Clone of chromium aad1ce808763f59c7a3753e08f1500a104ecc6fd refs/remotes/origin/HEAD
diff --git a/gpu/command_buffer/service/BUILD.gn b/gpu/command_buffer/service/BUILD.gn
new file mode 100644
index 0000000..e6c58b6
--- /dev/null
+++ b/gpu/command_buffer/service/BUILD.gn
@@ -0,0 +1,159 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/ui.gni")
+import("//third_party/protobuf/proto_library.gni")
+
+source_set("service") {
+ sources = [
+ "async_pixel_transfer_delegate.cc",
+ "async_pixel_transfer_delegate.h",
+ "async_pixel_transfer_manager_android.cc",
+ "async_pixel_transfer_manager_idle.cc",
+ "async_pixel_transfer_manager_idle.h",
+ "async_pixel_transfer_manager_linux.cc",
+ "async_pixel_transfer_manager_mac.cc",
+ "async_pixel_transfer_manager_share_group.cc",
+ "async_pixel_transfer_manager_share_group.h",
+ "async_pixel_transfer_manager_stub.cc",
+ "async_pixel_transfer_manager_stub.h",
+ "async_pixel_transfer_manager_sync.cc",
+ "async_pixel_transfer_manager_sync.h",
+ "async_pixel_transfer_manager_win.cc",
+ "async_pixel_transfer_manager.cc",
+ "async_pixel_transfer_manager.h",
+ "buffer_manager.h",
+ "buffer_manager.cc",
+ "cmd_buffer_engine.h",
+ "cmd_parser.cc",
+ "cmd_parser.h",
+ "command_buffer_service.cc",
+ "command_buffer_service.h",
+ "common_decoder.cc",
+ "common_decoder.h",
+ "context_group.h",
+ "context_group.cc",
+ "context_state.h",
+ "context_state_autogen.h",
+ "context_state_impl_autogen.h",
+ "context_state.cc",
+ "error_state.cc",
+ "error_state.h",
+ "feature_info.h",
+ "feature_info.cc",
+ "framebuffer_manager.h",
+ "framebuffer_manager.cc",
+ "gles2_cmd_copy_texture_chromium.cc",
+ "gles2_cmd_copy_texture_chromium.h",
+ "gles2_cmd_decoder.h",
+ "gles2_cmd_decoder_autogen.h",
+ "gles2_cmd_decoder.cc",
+ "gles2_cmd_validation.h",
+ "gles2_cmd_validation.cc",
+ "gles2_cmd_validation_autogen.h",
+ "gles2_cmd_validation_implementation_autogen.h",
+ "gl_context_virtual.cc",
+ "gl_context_virtual.h",
+ "gl_state_restorer_impl.cc",
+ "gl_state_restorer_impl.h",
+ "gl_utils.h",
+ "gpu_scheduler.cc",
+ "gpu_scheduler.h",
+ "gpu_scheduler_mock.h",
+ "gpu_state_tracer.cc",
+ "gpu_state_tracer.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "gpu_tracer.cc",
+ "gpu_tracer.h",
+ "id_manager.h",
+ "id_manager.cc",
+ "image_manager.cc",
+ "image_manager.h",
+ "in_process_command_buffer.cc",
+ "in_process_command_buffer.h",
+ "logger.cc",
+ "logger.h",
+ "mailbox_manager.cc",
+ "mailbox_manager.h",
+ "mailbox_synchronizer.cc",
+ "mailbox_synchronizer.h",
+ "memory_program_cache.h",
+ "memory_program_cache.cc",
+ "mocks.h",
+ "program_manager.h",
+ "program_manager.cc",
+ "query_manager.h",
+ "query_manager.cc",
+ "renderbuffer_manager.h",
+ "renderbuffer_manager.cc",
+ "program_cache.h",
+ "program_cache.cc",
+ "shader_manager.h",
+ "shader_manager.cc",
+ "shader_translator.h",
+ "shader_translator.cc",
+ "shader_translator_cache.h",
+ "shader_translator_cache.cc",
+ "stream_texture_manager_in_process_android.h",
+ "stream_texture_manager_in_process_android.cc",
+ "texture_definition.h",
+ "texture_definition.cc",
+ "texture_manager.h",
+ "texture_manager.cc",
+ "transfer_buffer_manager.cc",
+ "transfer_buffer_manager.h",
+ "vertex_array_manager.h",
+ "vertex_array_manager.cc",
+ "vertex_attrib_manager.h",
+ "vertex_attrib_manager.cc",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ configs += [
+ "//third_party/khronos:khronos_headers",
+ ]
+
+ # Prefer mesa GL headers to system headers, which cause problems on Win.
+ include_dirs = [ "//third_party/mesa/src/include" ]
+
+ public_deps = [
+ "//gpu/command_buffer/common",
+ ]
+ deps = [
+ ":disk_cache_proto",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//crypto",
+ "//third_party/angle:translator",
+ "//third_party/protobuf:protobuf_lite",
+ "//third_party/re2",
+ "//third_party/smhasher:cityhash",
+ "//ui/gfx",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+
+ if (ui_compositor_image_transport) {
+ include_dirs += [ "//third_party/khronos" ]
+ }
+
+ if (is_win || is_android || (is_linux && use_x11)) {
+ sources += [
+ "async_pixel_transfer_manager_egl.cc",
+ "async_pixel_transfer_manager_egl.h",
+ ]
+ }
+
+ if (is_android && !is_debug) {
+ # On Android optimize more since this component can be a bottleneck.
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+}
+
+proto_library("disk_cache_proto") {
+ sources = [ "disk_cache_proto.proto" ]
+}
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate.cc b/gpu/command_buffer/service/async_pixel_transfer_delegate.cc
new file mode 100644
index 0000000..201026b
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+AsyncMemoryParams::AsyncMemoryParams(scoped_refptr<Buffer> buffer,
+ uint32 data_offset,
+ uint32 data_size)
+ : buffer_(buffer), data_offset_(data_offset), data_size_(data_size) {
+ DCHECK(buffer_.get());
+ DCHECK(buffer_->memory());
+}
+
+AsyncMemoryParams::~AsyncMemoryParams() {
+}
+
+AsyncPixelTransferUploadStats::AsyncPixelTransferUploadStats()
+ : texture_upload_count_(0) {}
+
+AsyncPixelTransferUploadStats::~AsyncPixelTransferUploadStats() {}
+
+void AsyncPixelTransferUploadStats::AddUpload(base::TimeDelta transfer_time) {
+ base::AutoLock scoped_lock(lock_);
+ texture_upload_count_++;
+ total_texture_upload_time_ += transfer_time;
+}
+
+int AsyncPixelTransferUploadStats::GetStats(
+ base::TimeDelta* total_texture_upload_time) {
+ base::AutoLock scoped_lock(lock_);
+ if (total_texture_upload_time)
+ *total_texture_upload_time = total_texture_upload_time_;
+ return texture_upload_count_;
+}
+
+AsyncPixelTransferDelegate::AsyncPixelTransferDelegate() {}
+
+AsyncPixelTransferDelegate::~AsyncPixelTransferDelegate() {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate.h b/gpu/command_buffer/service/async_pixel_transfer_delegate.h
new file mode 100644
index 0000000..b41bcd5
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace base {
+class SharedMemory;
+}
+
+namespace gpu {
+
+struct AsyncTexImage2DParams {
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLint border;
+ GLenum format;
+ GLenum type;
+};
+
+struct AsyncTexSubImage2DParams {
+ GLenum target;
+ GLint level;
+ GLint xoffset;
+ GLint yoffset;
+ GLsizei width;
+ GLsizei height;
+ GLenum format;
+ GLenum type;
+};
+
+class AsyncMemoryParams {
+ public:
+ AsyncMemoryParams(scoped_refptr<Buffer> buffer,
+ uint32 data_offset,
+ uint32 data_size);
+ ~AsyncMemoryParams();
+
+ scoped_refptr<Buffer> buffer() const { return buffer_; }
+ uint32 data_size() const { return data_size_; }
+ uint32 data_offset() const { return data_offset_; }
+ void* GetDataAddress() const {
+ return buffer_->GetDataAddress(data_offset_, data_size_);
+ }
+
+ private:
+ scoped_refptr<Buffer> buffer_;
+ uint32 data_offset_;
+ uint32 data_size_;
+};
+
+class AsyncPixelTransferUploadStats
+ : public base::RefCountedThreadSafe<AsyncPixelTransferUploadStats> {
+ public:
+ AsyncPixelTransferUploadStats();
+
+ void AddUpload(base::TimeDelta transfer_time);
+ int GetStats(base::TimeDelta* total_texture_upload_time);
+
+ private:
+ friend class base::RefCountedThreadSafe<AsyncPixelTransferUploadStats>;
+
+ ~AsyncPixelTransferUploadStats();
+
+ int texture_upload_count_;
+ base::TimeDelta total_texture_upload_time_;
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferUploadStats);
+};
+
+class GPU_EXPORT AsyncPixelTransferDelegate {
+ public:
+ virtual ~AsyncPixelTransferDelegate();
+
+ // The callback occurs on the caller thread, once the texture is
+ // safe/ready to be used.
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) = 0;
+
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) = 0;
+
+ // Returns true if there is a transfer in progress.
+ virtual bool TransferIsInProgress() = 0;
+
+ // Block until the specified transfer completes.
+ virtual void WaitForTransferCompletion() = 0;
+
+ protected:
+ AsyncPixelTransferDelegate();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegate);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc
new file mode 100644
index 0000000..8e2d75f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+
+namespace gpu {
+
+MockAsyncPixelTransferDelegate::MockAsyncPixelTransferDelegate() {
+}
+
+MockAsyncPixelTransferDelegate::~MockAsyncPixelTransferDelegate() {
+ Destroy();
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h
new file mode 100644
index 0000000..9d28730
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockAsyncPixelTransferDelegate : public AsyncPixelTransferDelegate {
+ public:
+ MockAsyncPixelTransferDelegate();
+ virtual ~MockAsyncPixelTransferDelegate();
+
+ // Called in ~MockAsyncPixelTransferDelegate.
+ MOCK_METHOD0(Destroy, void());
+
+ // Implement AsyncPixelTransferDelegate.
+ MOCK_METHOD3(AsyncTexImage2D,
+ void(const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback));
+ MOCK_METHOD2(AsyncTexSubImage2D,
+ void(const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params));
+ MOCK_METHOD0(TransferIsInProgress, bool());
+ MOCK_METHOD0(WaitForTransferCompletion, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAsyncPixelTransferDelegate);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager.cc b/gpu/command_buffer/service/async_pixel_transfer_manager.cc
new file mode 100644
index 0000000..efc893a
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager.cc
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+AsyncPixelTransferCompletionObserver::AsyncPixelTransferCompletionObserver() {}
+
+AsyncPixelTransferCompletionObserver::~AsyncPixelTransferCompletionObserver() {}
+
+AsyncPixelTransferManager::AsyncPixelTransferManager() {}
+
+AsyncPixelTransferManager::~AsyncPixelTransferManager() {
+ if (manager_)
+ manager_->RemoveObserver(this);
+
+ for (TextureToDelegateMap::iterator ref = delegate_map_.begin();
+ ref != delegate_map_.end();
+ ref++) {
+ ref->first->RemoveObserver();
+ }
+}
+
+void AsyncPixelTransferManager::Initialize(gles2::TextureManager* manager) {
+ manager_ = manager;
+ manager_->AddObserver(this);
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManager::CreatePixelTransferDelegate(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ DCHECK(!GetPixelTransferDelegate(ref));
+ AsyncPixelTransferDelegate* delegate =
+ CreatePixelTransferDelegateImpl(ref, define_params);
+ delegate_map_[ref] = make_linked_ptr(delegate);
+ ref->AddObserver();
+ return delegate;
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManager::GetPixelTransferDelegate(
+ gles2::TextureRef* ref) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(ref);
+ if (it == delegate_map_.end()) {
+ return NULL;
+ } else {
+ return it->second.get();
+ }
+}
+
+void AsyncPixelTransferManager::ClearPixelTransferDelegateForTest(
+ gles2::TextureRef* ref) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(ref);
+ if (it != delegate_map_.end()) {
+ delegate_map_.erase(it);
+ ref->RemoveObserver();
+ }
+}
+
+bool AsyncPixelTransferManager::AsyncTransferIsInProgress(
+ gles2::TextureRef* ref) {
+ AsyncPixelTransferDelegate* delegate = GetPixelTransferDelegate(ref);
+ return delegate && delegate->TransferIsInProgress();
+}
+
+void AsyncPixelTransferManager::OnTextureManagerDestroying(
+ gles2::TextureManager* manager) {
+ // TextureManager should outlive AsyncPixelTransferManager.
+ NOTREACHED();
+ manager_ = NULL;
+}
+
+void AsyncPixelTransferManager::OnTextureRefDestroying(
+ gles2::TextureRef* texture) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(texture);
+ if (it != delegate_map_.end()) {
+ delegate_map_.erase(it);
+ texture->RemoveObserver();
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager.h b/gpu/command_buffer/service/async_pixel_transfer_manager.h
new file mode 100644
index 0000000..1a818f3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager.h
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
+
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/gpu_export.h"
+
+#if defined(COMPILER_GCC)
+namespace BASE_HASH_NAMESPACE {
+template <>
+ struct hash<gpu::gles2::TextureRef*> {
+ size_t operator()(gpu::gles2::TextureRef* ptr) const {
+ return hash<size_t>()(reinterpret_cast<size_t>(ptr));
+ }
+};
+} // namespace BASE_HASH_NAMESPACE
+#endif // COMPILER
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gpu {
+class AsyncPixelTransferDelegate;
+class AsyncMemoryParams;
+struct AsyncTexImage2DParams;
+
+class AsyncPixelTransferCompletionObserver
+ : public base::RefCountedThreadSafe<AsyncPixelTransferCompletionObserver> {
+ public:
+ AsyncPixelTransferCompletionObserver();
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) = 0;
+
+ protected:
+ virtual ~AsyncPixelTransferCompletionObserver();
+
+ private:
+ friend class base::RefCountedThreadSafe<AsyncPixelTransferCompletionObserver>;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferCompletionObserver);
+};
+
+class GPU_EXPORT AsyncPixelTransferManager
+ : public gles2::TextureManager::DestructionObserver {
+ public:
+ static AsyncPixelTransferManager* Create(gfx::GLContext* context);
+
+ virtual ~AsyncPixelTransferManager();
+
+ void Initialize(gles2::TextureManager* texture_manager);
+
+ virtual void BindCompletedAsyncTransfers() = 0;
+
+ // There's no guarantee that callback will run on the caller thread.
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) = 0;
+
+ virtual uint32 GetTextureUploadCount() = 0;
+ virtual base::TimeDelta GetTotalTextureUploadTime() = 0;
+
+ // ProcessMorePendingTransfers() will be called at a good time
+ // to process a small amount of pending transfer work while
+ // NeedsProcessMorePendingTransfers() returns true. Implementations
+ // that can't dispatch work to separate threads should use
+ // this to avoid blocking the caller thread inappropriately.
+ virtual void ProcessMorePendingTransfers() = 0;
+ virtual bool NeedsProcessMorePendingTransfers() = 0;
+
+ // Wait for all AsyncTex(Sub)Image2D uploads to finish before returning.
+ virtual void WaitAllAsyncTexImage2D() = 0;
+
+ AsyncPixelTransferDelegate* CreatePixelTransferDelegate(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params);
+
+ AsyncPixelTransferDelegate* GetPixelTransferDelegate(
+ gles2::TextureRef* ref);
+
+ void ClearPixelTransferDelegateForTest(gles2::TextureRef* ref);
+
+ bool AsyncTransferIsInProgress(gles2::TextureRef* ref);
+
+ // gles2::TextureRef::DestructionObserver implementation:
+ virtual void OnTextureManagerDestroying(gles2::TextureManager* manager)
+ OVERRIDE;
+ virtual void OnTextureRefDestroying(gles2::TextureRef* texture) OVERRIDE;
+
+ protected:
+ AsyncPixelTransferManager();
+
+ private:
+ gles2::TextureManager* manager_;
+
+ typedef base::hash_map<gles2::TextureRef*,
+ linked_ptr<AsyncPixelTransferDelegate> >
+ TextureToDelegateMap;
+ TextureToDelegateMap delegate_map_;
+
+ // A factory method called by CreatePixelTransferDelegate that is overriden
+ // by each implementation.
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc
new file mode 100644
index 0000000..eadc34f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_sync.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace {
+
+enum GpuType {
+ GPU_BROADCOM,
+ GPU_IMAGINATION,
+ GPU_NVIDIA_ES31,
+ GPU_ADRENO_420,
+ GPU_OTHER,
+};
+
+std::string MakeString(const char* s) {
+ return std::string(s ? s : "");
+}
+
+GpuType GetGpuType() {
+ const std::string vendor = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_VENDOR)));
+ const std::string renderer = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_RENDERER)));
+ const std::string version = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_VERSION)));
+
+ if (vendor.find("Broadcom") != std::string::npos)
+ return GPU_BROADCOM;
+
+ if (vendor.find("Imagination") != std::string::npos)
+ return GPU_IMAGINATION;
+
+ if (vendor.find("NVIDIA") != std::string::npos &&
+ version.find("OpenGL ES 3.1") != std::string::npos) {
+ return GPU_NVIDIA_ES31;
+ }
+
+ if (vendor.find("Qualcomm") != std::string::npos &&
+ renderer.find("Adreno (TM) 420") != std::string::npos) {
+ return GPU_ADRENO_420;
+ }
+
+ return GPU_OTHER;
+}
+
+bool AllowTransferThreadForGpu() {
+ GpuType gpu = GetGpuType();
+ return gpu != GPU_BROADCOM && gpu != GPU_IMAGINATION &&
+ gpu != GPU_NVIDIA_ES31 && gpu != GPU_ADRENO_420;
+}
+
+}
+
+// We only used threaded uploads when we can:
+// - Create EGLImages out of OpenGL textures (EGL_KHR_gl_texture_2D_image)
+// - Bind EGLImages to OpenGL textures (GL_OES_EGL_image)
+// - Use fences (to test for upload completion).
+// - The heap size is large enough.
+// TODO(kaanb|epenner): Remove the IsImagination() check pending the
+// resolution of crbug.com/249147
+// TODO(kaanb|epenner): Remove the IsLowEndDevice() check pending the
+// resolution of crbug.com/271929
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ DCHECK(context->IsCurrent(NULL));
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationEGLGLES2:
+ DCHECK(context);
+ if (!base::SysInfo::IsLowEndDevice() &&
+ context->HasExtension("EGL_KHR_fence_sync") &&
+ context->HasExtension("EGL_KHR_image") &&
+ context->HasExtension("EGL_KHR_image_base") &&
+ context->HasExtension("EGL_KHR_gl_texture_2D_image") &&
+ context->HasExtension("GL_OES_EGL_image") &&
+ AllowTransferThreadForGpu()) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager_CreateWithThread");
+ return new AsyncPixelTransferManagerEGL;
+ }
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationOSMesaGL: {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager_CreateIdle");
+ return new AsyncPixelTransferManagerIdle;
+ }
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
new file mode 100644
index 0000000..e153617
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
@@ -0,0 +1,752 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
+
+#include <list>
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+bool CheckErrors(const char* file, int line) {
+ EGLint eglerror;
+ GLenum glerror;
+ bool success = true;
+ while ((eglerror = eglGetError()) != EGL_SUCCESS) {
+ LOG(ERROR) << "Async transfer EGL error at "
+ << file << ":" << line << " " << eglerror;
+ success = false;
+ }
+ while ((glerror = glGetError()) != GL_NO_ERROR) {
+ LOG(ERROR) << "Async transfer OpenGL error at "
+ << file << ":" << line << " " << glerror;
+ success = false;
+ }
+ return success;
+}
+#define CHECK_GL() CheckErrors(__FILE__, __LINE__)
+
+const char kAsyncTransferThreadName[] = "AsyncTransferThread";
+
+// Regular glTexImage2D call.
+void DoTexImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
+ glTexImage2D(
+ GL_TEXTURE_2D, tex_params.level, tex_params.internal_format,
+ tex_params.width, tex_params.height,
+ tex_params.border, tex_params.format, tex_params.type, data);
+}
+
+// Regular glTexSubImage2D call.
+void DoTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, void* data) {
+ glTexSubImage2D(
+ GL_TEXTURE_2D, tex_params.level,
+ tex_params.xoffset, tex_params.yoffset,
+ tex_params.width, tex_params.height,
+ tex_params.format, tex_params.type, data);
+}
+
+// Full glTexSubImage2D call, from glTexImage2D params.
+void DoFullTexSubImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
+ glTexSubImage2D(
+ GL_TEXTURE_2D, tex_params.level,
+ 0, 0, tex_params.width, tex_params.height,
+ tex_params.format, tex_params.type, data);
+}
+
+void SetGlParametersForEglImageTexture() {
+ // These params are needed for EGLImage creation to succeed on several
+ // Android devices. I couldn't find this requirement in the EGLImage
+ // extension spec, but several devices fail without it.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+}
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+class TransferThread : public base::Thread {
+ public:
+ TransferThread() : base::Thread(kAsyncTransferThreadName) {
+ Start();
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+ virtual ~TransferThread() {
+ Stop();
+ }
+
+ virtual void Init() OVERRIDE {
+ gfx::GLShareGroup* share_group = NULL;
+ surface_ = new gfx::PbufferGLSurfaceEGL(gfx::Size(1, 1));
+ surface_->Initialize();
+ context_ = gfx::GLContext::CreateGLContext(
+ share_group, surface_.get(), gfx::PreferDiscreteGpu);
+ bool is_current = context_->MakeCurrent(surface_.get());
+ DCHECK(is_current);
+ }
+
+ virtual void CleanUp() OVERRIDE {
+ surface_ = NULL;
+ context_->ReleaseCurrent(surface_.get());
+ context_ = NULL;
+ }
+
+ private:
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(TransferThread);
+};
+
+base::LazyInstance<TransferThread>
+ g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
+
+base::MessageLoopProxy* transfer_message_loop_proxy() {
+ return g_transfer_thread.Pointer()->message_loop_proxy().get();
+}
+
+// Class which holds async pixel transfers state (EGLImage).
+// The EGLImage is accessed by either thread, but everything
+// else accessed only on the main thread.
+class TransferStateInternal
+ : public base::RefCountedThreadSafe<TransferStateInternal> {
+ public:
+ TransferStateInternal(GLuint texture_id,
+ const AsyncTexImage2DParams& define_params,
+ bool wait_for_uploads,
+ bool wait_for_creation,
+ bool use_image_preserved)
+ : texture_id_(texture_id),
+ thread_texture_id_(0),
+ transfer_completion_(true, true),
+ egl_image_(EGL_NO_IMAGE_KHR),
+ wait_for_uploads_(wait_for_uploads),
+ wait_for_creation_(wait_for_creation),
+ use_image_preserved_(use_image_preserved) {
+ define_params_ = define_params;
+ }
+
+ bool TransferIsInProgress() {
+ return !transfer_completion_.IsSignaled();
+ }
+
+ void BindTransfer() {
+ TRACE_EVENT2("gpu", "BindAsyncTransfer glEGLImageTargetTexture2DOES",
+ "width", define_params_.width,
+ "height", define_params_.height);
+ DCHECK(texture_id_);
+ if (EGL_NO_IMAGE_KHR == egl_image_)
+ return;
+
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
+ bind_callback_.Run();
+
+ DCHECK(CHECK_GL());
+ }
+
+ void CreateEglImage(GLuint texture_id) {
+ TRACE_EVENT0("gpu", "eglCreateImageKHR");
+ DCHECK(texture_id);
+ DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR);
+
+ EGLDisplay egl_display = eglGetCurrentDisplay();
+ EGLContext egl_context = eglGetCurrentContext();
+ EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR;
+ EGLClientBuffer egl_buffer =
+ reinterpret_cast<EGLClientBuffer>(texture_id);
+
+ EGLint image_preserved = use_image_preserved_ ? EGL_TRUE : EGL_FALSE;
+ EGLint egl_attrib_list[] = {
+ EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level.
+ EGL_IMAGE_PRESERVED_KHR, image_preserved,
+ EGL_NONE
+ };
+ egl_image_ = eglCreateImageKHR(
+ egl_display,
+ egl_context,
+ egl_target,
+ egl_buffer,
+ egl_attrib_list);
+
+ DLOG_IF(ERROR, EGL_NO_IMAGE_KHR == egl_image_)
+ << "eglCreateImageKHR failed";
+ }
+
+ void CreateEglImageOnUploadThread() {
+ CreateEglImage(thread_texture_id_);
+ }
+
+ void CreateEglImageOnMainThreadIfNeeded() {
+ if (egl_image_ == EGL_NO_IMAGE_KHR) {
+ CreateEglImage(texture_id_);
+ if (wait_for_creation_) {
+ TRACE_EVENT0("gpu", "glFinish creation");
+ glFinish();
+ }
+ }
+ }
+
+ void WaitForLastUpload() {
+ // This glFinish is just a safe-guard for if uploads have some
+ // GPU action that needs to occur. We could use fences and try
+ // to do this less often. However, on older drivers fences are
+ // not always reliable (eg. Mali-400 just blocks forever).
+ if (wait_for_uploads_) {
+ TRACE_EVENT0("gpu", "glFinish");
+ glFinish();
+ }
+ }
+
+ void MarkAsTransferIsInProgress() {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ transfer_completion_.Reset();
+ }
+
+ void MarkAsCompleted() {
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_completion_.Signal();
+ }
+
+ void WaitForTransferCompletion() {
+ TRACE_EVENT0("gpu", "WaitForTransferCompletion");
+ // TODO(backer): Deschedule the channel rather than blocking the main GPU
+ // thread (crbug.com/240265).
+ transfer_completion_.Wait();
+ }
+
+ void PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexImage",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK(!thread_texture_id_);
+ DCHECK_EQ(0, tex_params.level);
+ if (EGL_NO_IMAGE_KHR != egl_image_) {
+ MarkAsCompleted();
+ return;
+ }
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D no data");
+ glGenTextures(1, &thread_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+
+ SetGlParametersForEglImageTexture();
+
+ // If we need to use image_preserved, we pass the data with
+ // the allocation. Otherwise we use a NULL allocation to
+ // try to avoid any costs associated with creating the EGLImage.
+ if (use_image_preserved_)
+ DoTexImage2D(tex_params, data);
+ else
+ DoTexImage2D(tex_params, NULL);
+ }
+
+ CreateEglImageOnUploadThread();
+
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D with data");
+
+ // If we didn't use image_preserved, we haven't uploaded
+ // the data yet, so we do this with a full texSubImage.
+ if (!use_image_preserved_)
+ DoFullTexSubImage2D(tex_params, data);
+ }
+
+ WaitForLastUpload();
+ MarkAsCompleted();
+
+ DCHECK(CHECK_GL());
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ void PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexSubImage2D",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+
+ DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
+ DCHECK_EQ(0, tex_params.level);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ if (!thread_texture_id_) {
+ TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES");
+ glGenTextures(1, &thread_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
+ } else {
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+ }
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ DoTexSubImage2D(tex_params, data);
+ }
+ WaitForLastUpload();
+ MarkAsCompleted();
+
+ DCHECK(CHECK_GL());
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<TransferStateInternal>;
+ friend class gpu::AsyncPixelTransferDelegateEGL;
+
+ static void DeleteTexture(GLuint id) {
+ glDeleteTextures(1, &id);
+ }
+
+ virtual ~TransferStateInternal() {
+ if (egl_image_ != EGL_NO_IMAGE_KHR) {
+ EGLDisplay display = eglGetCurrentDisplay();
+ eglDestroyImageKHR(display, egl_image_);
+ }
+ if (thread_texture_id_) {
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(&DeleteTexture, thread_texture_id_));
+ }
+ }
+
+ // The 'real' texture.
+ GLuint texture_id_;
+
+ // The EGLImage sibling on the upload thread.
+ GLuint thread_texture_id_;
+
+ // Definition params for texture that needs binding.
+ AsyncTexImage2DParams define_params_;
+
+ // Indicates that an async transfer is in progress.
+ base::WaitableEvent transfer_completion_;
+
+ // It would be nice if we could just create a new EGLImage for
+ // every upload, but I found that didn't work, so this stores
+ // one for the lifetime of the texture.
+ EGLImageKHR egl_image_;
+
+ // Callback to invoke when AsyncTexImage2D is complete
+ // and the client can safely use the texture. This occurs
+ // during BindCompletedAsyncTransfers().
+ base::Closure bind_callback_;
+
+ // Customize when we block on fences (these are work-arounds).
+ bool wait_for_uploads_;
+ bool wait_for_creation_;
+ bool use_image_preserved_;
+};
+
+} // namespace
+
+// Class which handles async pixel transfers using EGLImageKHR and another
+// upload thread
+class AsyncPixelTransferDelegateEGL
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> {
+ public:
+ AsyncPixelTransferDelegateEGL(
+ AsyncPixelTransferManagerEGL::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateEGL();
+
+ void BindTransfer() { state_->BindTransfer(); }
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // Returns true if a work-around was used.
+ bool WorkAroundAsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback);
+ bool WorkAroundAsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params);
+
+ // A raw pointer is safe because the SharedState is owned by the Manager,
+ // which owns this Delegate.
+ AsyncPixelTransferManagerEGL::SharedState* shared_state_;
+ scoped_refptr<TransferStateInternal> state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL);
+};
+
+AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL(
+ AsyncPixelTransferManagerEGL::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : shared_state_(shared_state) {
+ // We can't wait on uploads on imagination (it can take 200ms+).
+ // In practice, they are complete when the CPU glTexSubImage2D completes.
+ bool wait_for_uploads = !shared_state_->is_imagination;
+
+ // Qualcomm runs into texture corruption problems if the same texture is
+ // uploaded to with both async and normal uploads. Synchronize after EGLImage
+ // creation on the main thread as a work-around.
+ bool wait_for_creation = shared_state_->is_qualcomm;
+
+ // Qualcomm has a race when using image_preserved=FALSE,
+ // which can result in black textures even after the first upload.
+ // Since using FALSE is mainly for performance (to avoid layout changes),
+ // but Qualcomm itself doesn't seem to get any performance benefit,
+ // we just using image_preservedd=TRUE on Qualcomm as a work-around.
+ bool use_image_preserved =
+ shared_state_->is_qualcomm || shared_state_->is_imagination;
+
+ state_ = new TransferStateInternal(texture_id,
+ define_params,
+ wait_for_uploads,
+ wait_for_creation,
+ use_image_preserved);
+}
+
+AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
+
+bool AsyncPixelTransferDelegateEGL::TransferIsInProgress() {
+ return state_->TransferIsInProgress();
+}
+
+void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion() {
+ if (state_->TransferIsInProgress()) {
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display);
+#endif
+
+ state_->WaitForTransferCompletion();
+ DCHECK(!state_->TransferIsInProgress());
+
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+}
+
+void AsyncPixelTransferDelegateEGL::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ if (WorkAroundAsyncTexImage2D(tex_params, mem_params, bind_callback))
+ return;
+
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(state_->egl_image_, EGL_NO_IMAGE_KHR);
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ // Mark the transfer in progress and save the late bind
+ // callback, so we can notify the client when it is bound.
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->bind_callback_ = bind_callback;
+
+ // Mark the transfer in progress.
+ state_->MarkAsTransferIsInProgress();
+
+ // Duplicate the shared memory so there is no way we can get
+ // a use-after-free of the raw pixels.
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(
+ &TransferStateInternal::PerformAsyncTexImage2D,
+ state_,
+ tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats));
+
+ DCHECK(CHECK_GL());
+}
+
+void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+ if (WorkAroundAsyncTexSubImage2D(tex_params, mem_params))
+ return;
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ // Mark the transfer in progress.
+ state_->MarkAsTransferIsInProgress();
+
+ // If this wasn't async allocated, we don't have an EGLImage yet.
+ // Create the EGLImage if it hasn't already been created.
+ state_->CreateEglImageOnMainThreadIfNeeded();
+
+ // Duplicate the shared memory so there are no way we can get
+ // a use-after-free of the raw pixels.
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(
+ &TransferStateInternal::PerformAsyncTexSubImage2D,
+ state_,
+ tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats));
+
+ DCHECK(CHECK_GL());
+}
+
+namespace {
+bool IsPowerOfTwo (unsigned int x) {
+ return ((x != 0) && !(x & (x - 1)));
+}
+
+bool IsMultipleOfEight(unsigned int x) {
+ return (x & 7) == 0;
+}
+
+bool DimensionsSupportImgFastPath(int width, int height) {
+ // Multiple of eight, but not a power of two.
+ return IsMultipleOfEight(width) &&
+ IsMultipleOfEight(height) &&
+ !(IsPowerOfTwo(width) &&
+ IsPowerOfTwo(height));
+}
+} // namespace
+
+// It is very difficult to stream uploads on Imagination GPUs:
+// - glTexImage2D defers a swizzle/stall until draw-time
+// - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
+// or longer if OpenGL is in heavy use by another thread.
+// The one combination that avoids these problems requires:
+// a.) Allocations/Uploads must occur on different threads/contexts.
+// b.) Texture size must be non-power-of-two.
+// When using a+b, uploads will be incorrect/corrupt unless:
+// c.) Texture size must be a multiple-of-eight.
+//
+// To achieve a.) we allocate synchronously on the main thread followed
+// by uploading on the upload thread. When b/c are not true we fall back
+// on purely synchronous allocation/upload on the main thread.
+
+bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ if (!shared_state_->is_imagination)
+ return false;
+
+ // On imagination we allocate synchronously all the time, even
+ // if the dimensions support fast uploads. This is for part a.)
+ // above, so allocations occur on a different thread/context as uploads.
+ void* data = mem_params.GetDataAddress();
+ SetGlParametersForEglImageTexture();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D with data");
+ DoTexImage2D(tex_params, data);
+ }
+
+ // The allocation has already occured, so mark it as finished
+ // and ready for binding.
+ CHECK(!state_->TransferIsInProgress());
+
+ // If the dimensions support fast async uploads, create the
+ // EGLImage for future uploads. The late bind should not
+ // be needed since the EGLImage was created from the main thread
+ // texture, but this is required to prevent an imagination driver crash.
+ if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) {
+ state_->CreateEglImageOnMainThreadIfNeeded();
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->bind_callback_ = bind_callback;
+ }
+
+ DCHECK(CHECK_GL());
+ return true;
+}
+
+bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ if (!shared_state_->is_imagination)
+ return false;
+
+ // If the dimensions support fast async uploads, we can use the
+ // normal async upload path for uploads.
+ if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
+ return false;
+
+ // Fall back on a synchronous stub as we don't have a known fast path.
+ // Also, older ICS drivers crash when we do any glTexSubImage2D on the
+ // same thread. To work around this we do glTexImage2D instead. Since
+ // we didn't create an EGLImage for this texture (see above), this is
+ // okay, but it limits this API to full updates for now.
+ DCHECK(!state_->egl_image_);
+ DCHECK_EQ(tex_params.xoffset, 0);
+ DCHECK_EQ(tex_params.yoffset, 0);
+ DCHECK_EQ(state_->define_params_.width, tex_params.width);
+ DCHECK_EQ(state_->define_params_.height, tex_params.height);
+ DCHECK_EQ(state_->define_params_.level, tex_params.level);
+ DCHECK_EQ(state_->define_params_.format, tex_params.format);
+ DCHECK_EQ(state_->define_params_.type, tex_params.type);
+
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time;
+ if (shared_state_->texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ // Note we use define_params_ instead of tex_params.
+ // The DCHECKs above verify this is always the same.
+ DoTexImage2D(state_->define_params_, data);
+ }
+ if (shared_state_->texture_upload_stats.get()) {
+ shared_state_->texture_upload_stats
+ ->AddUpload(base::TimeTicks::HighResNow() - begin_time);
+ }
+
+ DCHECK(CHECK_GL());
+ return true;
+}
+
+AsyncPixelTransferManagerEGL::SharedState::SharedState()
+ // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
+ : texture_upload_stats(new AsyncPixelTransferUploadStats) {
+ const char* vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
+ if (vendor) {
+ is_imagination =
+ std::string(vendor).find("Imagination") != std::string::npos;
+ is_qualcomm = std::string(vendor).find("Qualcomm") != std::string::npos;
+ }
+}
+
+AsyncPixelTransferManagerEGL::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() {}
+
+AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {}
+
+void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() {
+ scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
+
+ while(!shared_state_.pending_allocations.empty()) {
+ if (!shared_state_.pending_allocations.front().get()) {
+ shared_state_.pending_allocations.pop_front();
+ continue;
+ }
+ AsyncPixelTransferDelegateEGL* delegate =
+ shared_state_.pending_allocations.front().get();
+ // Terminate early, as all transfers finish in order, currently.
+ if (delegate->TransferIsInProgress())
+ break;
+
+ if (!texture_binder)
+ texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
+
+ // If the transfer is finished, bind it to the texture
+ // and remove it from pending list.
+ delegate->BindTransfer();
+ shared_state_.pending_allocations.pop_front();
+ }
+}
+
+void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ // Post a PerformNotifyCompletion task to the upload thread. This task
+ // will run after all async transfers are complete.
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer)));
+}
+
+uint32 AsyncPixelTransferManagerEGL::GetTextureUploadCount() {
+ return shared_state_.texture_upload_stats->GetStats(NULL);
+}
+
+base::TimeDelta AsyncPixelTransferManagerEGL::GetTotalTextureUploadTime() {
+ base::TimeDelta total_texture_upload_time;
+ shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
+ return total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerEGL::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateEGL* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateEGL(
+ &shared_state_, ref->service_id(), define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
new file mode 100644
index 0000000..8f0c4b3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/memory/ref_counted.h"
+
+namespace gpu {
+class AsyncPixelTransferDelegateEGL;
+class AsyncPixelTransferUploadStats;
+
+class AsyncPixelTransferManagerEGL : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerEGL();
+ virtual ~AsyncPixelTransferManagerEGL();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats;
+ bool is_imagination;
+ bool is_qualcomm;
+ typedef std::list<base::WeakPtr<AsyncPixelTransferDelegateEGL> >
+ TransferQueue;
+ TransferQueue pending_allocations;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerEGL);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
new file mode 100644
index 0000000..40ec87f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
@@ -0,0 +1,324 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/memory/weak_ptr.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+static uint64 g_next_pixel_transfer_state_id = 1;
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+} // namespace
+
+// Class which handles async pixel transfers in a platform
+// independent way.
+class AsyncPixelTransferDelegateIdle
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateIdle> {
+ public:
+ AsyncPixelTransferDelegateIdle(
+ AsyncPixelTransferManagerIdle::SharedState* state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateIdle();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ void PerformAsyncTexImage2D(AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ const base::Closure& bind_callback);
+ void PerformAsyncTexSubImage2D(AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params);
+
+ uint64 id_;
+ GLuint texture_id_;
+ bool transfer_in_progress_;
+ AsyncTexImage2DParams define_params_;
+
+ // Safe to hold a raw pointer because SharedState is owned by the Manager
+ // which owns the Delegate.
+ AsyncPixelTransferManagerIdle::SharedState* shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateIdle);
+};
+
+AsyncPixelTransferDelegateIdle::AsyncPixelTransferDelegateIdle(
+ AsyncPixelTransferManagerIdle::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : id_(g_next_pixel_transfer_state_id++),
+ texture_id_(texture_id),
+ transfer_in_progress_(false),
+ define_params_(define_params),
+ shared_state_(shared_state) {}
+
+AsyncPixelTransferDelegateIdle::~AsyncPixelTransferDelegateIdle() {}
+
+void AsyncPixelTransferDelegateIdle::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+
+ shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
+ id_,
+ this,
+ base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexImage2D,
+ AsWeakPtr(),
+ tex_params,
+ mem_params,
+ bind_callback)));
+
+ transfer_in_progress_ = true;
+}
+
+void AsyncPixelTransferDelegateIdle::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+
+ shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
+ id_,
+ this,
+ base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D,
+ AsWeakPtr(),
+ tex_params,
+ mem_params)));
+
+ transfer_in_progress_ = true;
+}
+
+bool AsyncPixelTransferDelegateIdle::TransferIsInProgress() {
+ return transfer_in_progress_;
+}
+
+void AsyncPixelTransferDelegateIdle::WaitForTransferCompletion() {
+ for (std::list<AsyncPixelTransferManagerIdle::Task>::iterator iter =
+ shared_state_->tasks.begin();
+ iter != shared_state_->tasks.end();
+ ++iter) {
+ if (iter->transfer_id != id_)
+ continue;
+
+ (*iter).task.Run();
+ shared_state_->tasks.erase(iter);
+ break;
+ }
+
+ shared_state_->ProcessNotificationTasks();
+}
+
+void AsyncPixelTransferDelegateIdle::PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT2("gpu", "PerformAsyncTexImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ gfx::ScopedTextureBinder texture_binder(tex_params.target, texture_id_);
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ }
+
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_in_progress_ = false;
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+
+ // The texture is already fully bound so just call it now.
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params) {
+ TRACE_EVENT2("gpu", "PerformAsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ gfx::ScopedTextureBinder texture_binder(tex_params.target, texture_id_);
+
+ // If it's a full texture update, use glTexImage2D as it's faster.
+ // TODO(epenner): Make this configurable (http://crbug.com/259924)
+ if (tex_params.xoffset == 0 &&
+ tex_params.yoffset == 0 &&
+ tex_params.target == define_params_.target &&
+ tex_params.level == define_params_.level &&
+ tex_params.width == define_params_.width &&
+ tex_params.height == define_params_.height) {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(
+ define_params_.target,
+ define_params_.level,
+ define_params_.internal_format,
+ define_params_.width,
+ define_params_.height,
+ define_params_.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ } else {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ glTexSubImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ }
+
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_in_progress_ = false;
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+}
+
+AsyncPixelTransferManagerIdle::Task::Task(
+ uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task)
+ : transfer_id(transfer_id),
+ delegate(delegate),
+ task(task) {
+}
+
+AsyncPixelTransferManagerIdle::Task::~Task() {}
+
+AsyncPixelTransferManagerIdle::SharedState::SharedState()
+ : texture_upload_count(0) {}
+
+AsyncPixelTransferManagerIdle::SharedState::~SharedState() {}
+
+void AsyncPixelTransferManagerIdle::SharedState::ProcessNotificationTasks() {
+ while (!tasks.empty()) {
+ // Stop when we reach a pixel transfer task.
+ if (tasks.front().transfer_id)
+ return;
+
+ tasks.front().task.Run();
+ tasks.pop_front();
+ }
+}
+
+AsyncPixelTransferManagerIdle::AsyncPixelTransferManagerIdle()
+ : shared_state_() {
+}
+
+AsyncPixelTransferManagerIdle::~AsyncPixelTransferManagerIdle() {}
+
+void AsyncPixelTransferManagerIdle::BindCompletedAsyncTransfers() {
+ // Everything is already bound.
+}
+
+void AsyncPixelTransferManagerIdle::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ if (shared_state_.tasks.empty()) {
+ observer->DidComplete(mem_params);
+ return;
+ }
+
+ shared_state_.tasks.push_back(
+ Task(0, // 0 transfer_id for notification tasks.
+ NULL,
+ base::Bind(
+ &PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer))));
+}
+
+uint32 AsyncPixelTransferManagerIdle::GetTextureUploadCount() {
+ return shared_state_.texture_upload_count;
+}
+
+base::TimeDelta AsyncPixelTransferManagerIdle::GetTotalTextureUploadTime() {
+ return shared_state_.total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerIdle::ProcessMorePendingTransfers() {
+ if (shared_state_.tasks.empty())
+ return;
+
+ // First task should always be a pixel transfer task.
+ DCHECK(shared_state_.tasks.front().transfer_id);
+ shared_state_.tasks.front().task.Run();
+ shared_state_.tasks.pop_front();
+
+ shared_state_.ProcessNotificationTasks();
+}
+
+bool AsyncPixelTransferManagerIdle::NeedsProcessMorePendingTransfers() {
+ return !shared_state_.tasks.empty();
+}
+
+void AsyncPixelTransferManagerIdle::WaitAllAsyncTexImage2D() {
+ if (shared_state_.tasks.empty())
+ return;
+
+ const Task& task = shared_state_.tasks.back();
+ if (task.delegate)
+ task.delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerIdle::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateIdle(&shared_state_,
+ ref->service_id(),
+ define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
new file mode 100644
index 0000000..af3262f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
@@ -0,0 +1,68 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
+
+#include <list>
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerIdle : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerIdle();
+ virtual ~AsyncPixelTransferManagerIdle();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ struct Task {
+ Task(uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task);
+ ~Task();
+
+ // This is non-zero if pixel transfer task.
+ uint64 transfer_id;
+
+ AsyncPixelTransferDelegate* delegate;
+
+ base::Closure task;
+ };
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+ void ProcessNotificationTasks();
+
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+ std::list<Task> tasks;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerIdle);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc
new file mode 100644
index 0000000..8d25f00
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableShareGroupAsyncTextureUpload)) {
+ DCHECK(context);
+ return static_cast<AsyncPixelTransferManager*> (
+ new AsyncPixelTransferManagerShareGroup(context));
+ }
+
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationEGLGLES2:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc
new file mode 100644
index 0000000..8c19b57
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationAppleGL:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc
new file mode 100644
index 0000000..84e95e3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+MockAsyncPixelTransferManager::MockAsyncPixelTransferManager() {}
+
+MockAsyncPixelTransferManager::~MockAsyncPixelTransferManager() {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
new file mode 100644
index 0000000..3bc8b6b
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockAsyncPixelTransferManager : public AsyncPixelTransferManager {
+ public:
+ MockAsyncPixelTransferManager();
+ virtual ~MockAsyncPixelTransferManager();
+
+ // AsyncPixelTransferManager implementation:
+ MOCK_METHOD0(BindCompletedAsyncTransfers, void());
+ MOCK_METHOD2(AsyncNotifyCompletion,
+ void(const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer));
+ MOCK_METHOD0(GetTextureUploadCount, uint32());
+ MOCK_METHOD0(GetTotalTextureUploadTime, base::TimeDelta());
+ MOCK_METHOD0(ProcessMorePendingTransfers, void());
+ MOCK_METHOD0(NeedsProcessMorePendingTransfers, bool());
+ MOCK_METHOD0(WaitAllAsyncTexImage2D, void());
+ MOCK_METHOD2(
+ CreatePixelTransferDelegateImpl,
+ AsyncPixelTransferDelegate*(gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAsyncPixelTransferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
new file mode 100644
index 0000000..99103b8
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
@@ -0,0 +1,555 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+const char kAsyncTransferThreadName[] = "AsyncTransferThread";
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+// TODO(backer): Factor out common thread scheduling logic from the EGL and
+// ShareGroup implementations. http://crbug.com/239889
+class TransferThread : public base::Thread {
+ public:
+ TransferThread()
+ : base::Thread(kAsyncTransferThreadName),
+ initialized_(false) {
+ Start();
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+
+ virtual ~TransferThread() {
+ // The only instance of this class was declared leaky.
+ NOTREACHED();
+ }
+
+ void InitializeOnMainThread(gfx::GLContext* parent_context) {
+ TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
+ if (initialized_)
+ return;
+
+ base::WaitableEvent wait_for_init(true, false);
+ message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&TransferThread::InitializeOnTransferThread,
+ base::Unretained(this),
+ base::Unretained(parent_context),
+ &wait_for_init));
+ wait_for_init.Wait();
+ }
+
+ virtual void CleanUp() OVERRIDE {
+ surface_ = NULL;
+ context_ = NULL;
+ }
+
+ private:
+ bool initialized_;
+
+ scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<gfx::GLContext> context_;
+
+ void InitializeOnTransferThread(gfx::GLContext* parent_context,
+ base::WaitableEvent* caller_wait) {
+ TRACE_EVENT0("gpu", "InitializeOnTransferThread");
+
+ if (!parent_context) {
+ LOG(ERROR) << "No parent context provided.";
+ caller_wait->Signal();
+ return;
+ }
+
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
+ if (!surface_.get()) {
+ LOG(ERROR) << "Unable to create GLSurface";
+ caller_wait->Signal();
+ return;
+ }
+
+ // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
+ // we would probably want to use a PBO texture upload for a true async
+ // upload (that would hopefully be optimized as a DMA transfer by the
+ // driver).
+ context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
+ surface_.get(),
+ gfx::PreferIntegratedGpu);
+ if (!context_.get()) {
+ LOG(ERROR) << "Unable to create GLContext.";
+ caller_wait->Signal();
+ return;
+ }
+
+ context_->MakeCurrent(surface_.get());
+ initialized_ = true;
+ caller_wait->Signal();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(TransferThread);
+};
+
+base::LazyInstance<TransferThread>::Leaky
+ g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
+
+base::MessageLoopProxy* transfer_message_loop_proxy() {
+ return g_transfer_thread.Pointer()->message_loop_proxy().get();
+}
+
+class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
+ public:
+ explicit PendingTask(const base::Closure& task)
+ : task_(task), task_pending_(true, false) {}
+
+ bool TryRun() {
+ // This is meant to be called on the main thread where the texture
+ // is already bound.
+ DCHECK(checker_.CalledOnValidThread());
+ if (task_lock_.Try()) {
+ // Only run once.
+ if (!task_.is_null())
+ task_.Run();
+ task_.Reset();
+
+ task_lock_.Release();
+ task_pending_.Signal();
+ return true;
+ }
+ return false;
+ }
+
+ void BindAndRun(GLuint texture_id) {
+ // This is meant to be called on the upload thread where we don't have to
+ // restore the previous texture binding.
+ DCHECK(!checker_.CalledOnValidThread());
+ base::AutoLock locked(task_lock_);
+ if (!task_.is_null()) {
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ task_.Run();
+ task_.Reset();
+ glBindTexture(GL_TEXTURE_2D, 0);
+ // Flush for synchronization between threads.
+ glFlush();
+ task_pending_.Signal();
+ }
+ }
+
+ void Cancel() {
+ base::AutoLock locked(task_lock_);
+ task_.Reset();
+ task_pending_.Signal();
+ }
+
+ bool TaskIsInProgress() {
+ return !task_pending_.IsSignaled();
+ }
+
+ void WaitForTask() {
+ task_pending_.Wait();
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<PendingTask>;
+
+ virtual ~PendingTask() {}
+
+ base::ThreadChecker checker_;
+
+ base::Lock task_lock_;
+ base::Closure task_;
+ base::WaitableEvent task_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingTask);
+};
+
+// Class which holds async pixel transfers state.
+// The texture_id is accessed by either thread, but everything
+// else accessed only on the main thread.
+class TransferStateInternal
+ : public base::RefCountedThreadSafe<TransferStateInternal> {
+ public:
+ TransferStateInternal(GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : texture_id_(texture_id), define_params_(define_params) {}
+
+ bool TransferIsInProgress() {
+ return pending_upload_task_.get() &&
+ pending_upload_task_->TaskIsInProgress();
+ }
+
+ void BindTransfer() {
+ TRACE_EVENT2("gpu", "BindAsyncTransfer",
+ "width", define_params_.width,
+ "height", define_params_.height);
+ DCHECK(texture_id_);
+
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ bind_callback_.Run();
+ }
+
+ void WaitForTransferCompletion() {
+ TRACE_EVENT0("gpu", "WaitForTransferCompletion");
+ DCHECK(pending_upload_task_.get());
+ if (!pending_upload_task_->TryRun()) {
+ pending_upload_task_->WaitForTask();
+ }
+ pending_upload_task_ = NULL;
+ }
+
+ void CancelUpload() {
+ TRACE_EVENT0("gpu", "CancelUpload");
+ if (pending_upload_task_.get())
+ pending_upload_task_->Cancel();
+ pending_upload_task_ = NULL;
+ }
+
+ void ScheduleAsyncTexImage2D(
+ const AsyncTexImage2DParams tex_params,
+ const AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ pending_upload_task_ = new PendingTask(base::Bind(
+ &TransferStateInternal::PerformAsyncTexImage2D,
+ this,
+ tex_params,
+ mem_params,
+ texture_upload_stats));
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
+
+ // Save the late bind callback, so we can notify the client when it is
+ // bound.
+ bind_callback_ = bind_callback;
+ }
+
+ void ScheduleAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ pending_upload_task_ = new PendingTask(base::Bind(
+ &TransferStateInternal::PerformAsyncTexSubImage2D,
+ this,
+ tex_params,
+ mem_params,
+ texture_upload_stats));
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<TransferStateInternal>;
+
+ virtual ~TransferStateInternal() {
+ }
+
+ void PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexImage",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK_EQ(0, tex_params.level);
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ void* data = mem_params.GetDataAddress();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(GL_TEXTURE_2D,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ }
+
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ void PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexSubImage2D",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK_EQ(0, tex_params.level);
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ void* data = mem_params.GetDataAddress();
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ glTexSubImage2D(GL_TEXTURE_2D,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ }
+
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ scoped_refptr<PendingTask> pending_upload_task_;
+
+ GLuint texture_id_;
+
+ // Definition params for texture that needs binding.
+ AsyncTexImage2DParams define_params_;
+
+ // Callback to invoke when AsyncTexImage2D is complete
+ // and the client can safely use the texture. This occurs
+ // during BindCompletedAsyncTransfers().
+ base::Closure bind_callback_;
+};
+
+} // namespace
+
+class AsyncPixelTransferDelegateShareGroup
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
+ public:
+ AsyncPixelTransferDelegateShareGroup(
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateShareGroup();
+
+ void BindTransfer() { state_->BindTransfer(); }
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // A raw pointer is safe because the SharedState is owned by the Manager,
+ // which owns this Delegate.
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
+ scoped_refptr<TransferStateInternal> state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
+};
+
+AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : shared_state_(shared_state),
+ state_(new TransferStateInternal(texture_id, define_params)) {}
+
+AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
+ TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
+ state_->CancelUpload();
+}
+
+bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
+ return state_->TransferIsInProgress();
+}
+
+void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
+ if (state_->TransferIsInProgress()) {
+ state_->WaitForTransferCompletion();
+ DCHECK(!state_->TransferIsInProgress());
+ }
+
+ // Fast track the BindTransfer, if applicable.
+ for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
+ iter = shared_state_->pending_allocations.begin();
+ iter != shared_state_->pending_allocations.end();
+ ++iter) {
+ if (iter->get() != this)
+ continue;
+
+ shared_state_->pending_allocations.erase(iter);
+ BindTransfer();
+ break;
+ }
+}
+
+void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->ScheduleAsyncTexImage2D(tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats,
+ bind_callback);
+}
+
+void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ state_->ScheduleAsyncTexSubImage2D(
+ tex_params, mem_params, shared_state_->texture_upload_stats);
+}
+
+AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
+ // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
+ : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
+
+AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
+ gfx::GLContext* context) {
+ g_transfer_thread.Pointer()->InitializeOnMainThread(context);
+}
+
+AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
+
+void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
+ scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
+
+ while (!shared_state_.pending_allocations.empty()) {
+ if (!shared_state_.pending_allocations.front().get()) {
+ shared_state_.pending_allocations.pop_front();
+ continue;
+ }
+ AsyncPixelTransferDelegateShareGroup* delegate =
+ shared_state_.pending_allocations.front().get();
+ // Terminate early, as all transfers finish in order, currently.
+ if (delegate->TransferIsInProgress())
+ break;
+
+ if (!texture_binder)
+ texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
+
+ // Used to set tex info from the gles2 cmd decoder once upload has
+ // finished (it'll bind the texture and call a callback).
+ delegate->BindTransfer();
+
+ shared_state_.pending_allocations.pop_front();
+ }
+}
+
+void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ // Post a PerformNotifyCompletion task to the upload thread. This task
+ // will run after all async transfers are complete.
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer)));
+}
+
+uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
+ return shared_state_.texture_upload_stats->GetStats(NULL);
+}
+
+base::TimeDelta
+AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
+ base::TimeDelta total_texture_upload_time;
+ shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
+ return total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateShareGroup* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateShareGroup(
+ &shared_state_, ref->service_id(), define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
new file mode 100644
index 0000000..64daffe
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/memory/ref_counted.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gpu {
+class AsyncPixelTransferDelegateShareGroup;
+class AsyncPixelTransferUploadStats;
+
+class AsyncPixelTransferManagerShareGroup : public AsyncPixelTransferManager {
+ public:
+ explicit AsyncPixelTransferManagerShareGroup(gfx::GLContext* context);
+ virtual ~AsyncPixelTransferManagerShareGroup();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats;
+ typedef std::list<base::WeakPtr<AsyncPixelTransferDelegateShareGroup> >
+ TransferQueue;
+ TransferQueue pending_allocations;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerShareGroup);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
new file mode 100644
index 0000000..d5f96b0
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
@@ -0,0 +1,91 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+class AsyncPixelTransferDelegateStub : public AsyncPixelTransferDelegate {
+ public:
+ AsyncPixelTransferDelegateStub();
+ virtual ~AsyncPixelTransferDelegateStub();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateStub);
+};
+
+AsyncPixelTransferDelegateStub::AsyncPixelTransferDelegateStub() {}
+
+AsyncPixelTransferDelegateStub::~AsyncPixelTransferDelegateStub() {}
+
+void AsyncPixelTransferDelegateStub::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateStub::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+}
+
+bool AsyncPixelTransferDelegateStub::TransferIsInProgress() {
+ return false;
+}
+
+void AsyncPixelTransferDelegateStub::WaitForTransferCompletion() {}
+
+AsyncPixelTransferManagerStub::AsyncPixelTransferManagerStub() {}
+
+AsyncPixelTransferManagerStub::~AsyncPixelTransferManagerStub() {}
+
+void AsyncPixelTransferManagerStub::BindCompletedAsyncTransfers() {
+}
+
+void AsyncPixelTransferManagerStub::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ observer->DidComplete(mem_params);
+}
+
+uint32 AsyncPixelTransferManagerStub::GetTextureUploadCount() {
+ return 0;
+}
+
+base::TimeDelta AsyncPixelTransferManagerStub::GetTotalTextureUploadTime() {
+ return base::TimeDelta();
+}
+
+void AsyncPixelTransferManagerStub::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerStub::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerStub::WaitAllAsyncTexImage2D() {
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerStub::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateStub();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
new file mode 100644
index 0000000..a93ce94
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerStub : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerStub();
+ virtual ~AsyncPixelTransferManagerStub();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerStub);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
new file mode 100644
index 0000000..cd7d087
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
@@ -0,0 +1,141 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_sync.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+// Class which handles async pixel transfers synchronously.
+class AsyncPixelTransferDelegateSync : public AsyncPixelTransferDelegate {
+ public:
+ explicit AsyncPixelTransferDelegateSync(
+ AsyncPixelTransferManagerSync::SharedState* shared_state);
+ virtual ~AsyncPixelTransferDelegateSync();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // Safe to hold a raw pointer because SharedState is owned by the Manager
+ // which owns the Delegate.
+ AsyncPixelTransferManagerSync::SharedState* shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateSync);
+};
+
+AsyncPixelTransferDelegateSync::AsyncPixelTransferDelegateSync(
+ AsyncPixelTransferManagerSync::SharedState* shared_state)
+ : shared_state_(shared_state) {}
+
+AsyncPixelTransferDelegateSync::~AsyncPixelTransferDelegateSync() {}
+
+void AsyncPixelTransferDelegateSync::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ // Save the define params to return later during deferred
+ // binding of the transfer texture.
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ glTexImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+ // The texture is already fully bound so just call it now.
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateSync::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ glTexSubImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+}
+
+bool AsyncPixelTransferDelegateSync::TransferIsInProgress() {
+ // Already done.
+ return false;
+}
+
+void AsyncPixelTransferDelegateSync::WaitForTransferCompletion() {
+ // Already done.
+}
+
+AsyncPixelTransferManagerSync::SharedState::SharedState()
+ : texture_upload_count(0) {}
+
+AsyncPixelTransferManagerSync::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerSync::AsyncPixelTransferManagerSync() {}
+
+AsyncPixelTransferManagerSync::~AsyncPixelTransferManagerSync() {}
+
+void AsyncPixelTransferManagerSync::BindCompletedAsyncTransfers() {
+ // Everything is already bound.
+}
+
+void AsyncPixelTransferManagerSync::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ observer->DidComplete(mem_params);
+}
+
+uint32 AsyncPixelTransferManagerSync::GetTextureUploadCount() {
+ return shared_state_.texture_upload_count;
+}
+
+base::TimeDelta AsyncPixelTransferManagerSync::GetTotalTextureUploadTime() {
+ return shared_state_.total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerSync::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerSync::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerSync::WaitAllAsyncTexImage2D() {
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerSync::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateSync(&shared_state_);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
new file mode 100644
index 0000000..7d0b8b6
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerSync : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerSync();
+ virtual ~AsyncPixelTransferManagerSync();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerSync);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc
new file mode 100644
index 0000000..6955885
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationEGLGLES2:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/buffer_manager.cc b/gpu/command_buffer/service/buffer_manager.cc
new file mode 100644
index 0000000..7b1c90d
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager.cc
@@ -0,0 +1,407 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include <limits>
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+BufferManager::BufferManager(
+ MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info)
+ : memory_tracker_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kManaged)),
+ feature_info_(feature_info),
+ allow_buffers_on_multiple_targets_(false),
+ buffer_count_(0),
+ have_context_(true),
+ use_client_side_arrays_for_stream_buffers_(
+ feature_info ? feature_info->workarounds(
+ ).use_client_side_arrays_for_stream_buffers : 0) {
+}
+
+BufferManager::~BufferManager() {
+ DCHECK(buffers_.empty());
+ CHECK_EQ(buffer_count_, 0u);
+}
+
+void BufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ buffers_.clear();
+ DCHECK_EQ(0u, memory_tracker_->GetMemRepresented());
+}
+
+void BufferManager::CreateBuffer(GLuint client_id, GLuint service_id) {
+ scoped_refptr<Buffer> buffer(new Buffer(this, service_id));
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(client_id, buffer));
+ DCHECK(result.second);
+}
+
+Buffer* BufferManager::GetBuffer(
+ GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ return it != buffers_.end() ? it->second.get() : NULL;
+}
+
+void BufferManager::RemoveBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ if (it != buffers_.end()) {
+ Buffer* buffer = it->second.get();
+ buffer->MarkAsDeleted();
+ buffers_.erase(it);
+ }
+}
+
+void BufferManager::StartTracking(Buffer* /* buffer */) {
+ ++buffer_count_;
+}
+
+void BufferManager::StopTracking(Buffer* buffer) {
+ memory_tracker_->TrackMemFree(buffer->size());
+ --buffer_count_;
+}
+
+Buffer::Buffer(BufferManager* manager, GLuint service_id)
+ : manager_(manager),
+ size_(0),
+ deleted_(false),
+ shadowed_(false),
+ is_client_side_array_(false),
+ service_id_(service_id),
+ target_(0),
+ usage_(GL_STATIC_DRAW) {
+ manager_->StartTracking(this);
+}
+
+Buffer::~Buffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteBuffersARB(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void Buffer::SetInfo(
+ GLsizeiptr size, GLenum usage, bool shadow, const GLvoid* data,
+ bool is_client_side_array) {
+ usage_ = usage;
+ is_client_side_array_ = is_client_side_array;
+ ClearCache();
+ if (size != size_ || shadow != shadowed_) {
+ shadowed_ = shadow;
+ size_ = size;
+ if (shadowed_) {
+ shadow_.reset(new int8[size]);
+ } else {
+ shadow_.reset();
+ }
+ }
+ if (shadowed_) {
+ if (data) {
+ memcpy(shadow_.get(), data, size);
+ } else {
+ memset(shadow_.get(), 0, size);
+ }
+ }
+}
+
+bool Buffer::CheckRange(
+ GLintptr offset, GLsizeiptr size) const {
+ int32 end = 0;
+ return offset >= 0 && size >= 0 &&
+ offset <= std::numeric_limits<int32>::max() &&
+ size <= std::numeric_limits<int32>::max() &&
+ SafeAddInt32(offset, size, &end) && end <= size_;
+}
+
+bool Buffer::SetRange(
+ GLintptr offset, GLsizeiptr size, const GLvoid * data) {
+ if (!CheckRange(offset, size)) {
+ return false;
+ }
+ if (shadowed_) {
+ memcpy(shadow_.get() + offset, data, size);
+ ClearCache();
+ }
+ return true;
+}
+
+const void* Buffer::GetRange(
+ GLintptr offset, GLsizeiptr size) const {
+ if (!shadowed_) {
+ return NULL;
+ }
+ if (!CheckRange(offset, size)) {
+ return NULL;
+ }
+ return shadow_.get() + offset;
+}
+
+void Buffer::ClearCache() {
+ range_set_.clear();
+}
+
+template <typename T>
+GLuint GetMaxValue(const void* data, GLuint offset, GLsizei count) {
+ GLuint max_value = 0;
+ const T* element = reinterpret_cast<const T*>(
+ static_cast<const int8*>(data) + offset);
+ const T* end = element + count;
+ for (; element < end; ++element) {
+ if (*element > max_value) {
+ max_value = *element;
+ }
+ }
+ return max_value;
+}
+
+bool Buffer::GetMaxValueForRange(
+ GLuint offset, GLsizei count, GLenum type, GLuint* max_value) {
+ Range range(offset, count, type);
+ RangeToMaxValueMap::iterator it = range_set_.find(range);
+ if (it != range_set_.end()) {
+ *max_value = it->second;
+ return true;
+ }
+
+ uint32 size;
+ if (!SafeMultiplyUint32(
+ count, GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type), &size)) {
+ return false;
+ }
+
+ if (!SafeAddUint32(offset, size, &size)) {
+ return false;
+ }
+
+ if (size > static_cast<uint32>(size_)) {
+ return false;
+ }
+
+ if (!shadowed_) {
+ return false;
+ }
+
+ // Scan the range for the max value and store
+ GLuint max_v = 0;
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ max_v = GetMaxValue<uint8>(shadow_.get(), offset, count);
+ break;
+ case GL_UNSIGNED_SHORT:
+ // Check we are not accessing an odd byte for a 2 byte value.
+ if ((offset & 1) != 0) {
+ return false;
+ }
+ max_v = GetMaxValue<uint16>(shadow_.get(), offset, count);
+ break;
+ case GL_UNSIGNED_INT:
+ // Check we are not accessing a non aligned address for a 4 byte value.
+ if ((offset & 3) != 0) {
+ return false;
+ }
+ max_v = GetMaxValue<uint32>(shadow_.get(), offset, count);
+ break;
+ default:
+ NOTREACHED(); // should never get here by validation.
+ break;
+ }
+ range_set_.insert(std::make_pair(range, max_v));
+ *max_value = max_v;
+ return true;
+}
+
+bool BufferManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (BufferMap::const_iterator it = buffers_.begin();
+ it != buffers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool BufferManager::IsUsageClientSideArray(GLenum usage) {
+ return usage == GL_STREAM_DRAW && use_client_side_arrays_for_stream_buffers_;
+}
+
+bool BufferManager::UseNonZeroSizeForClientSideArrayBuffer() {
+ return feature_info_.get() &&
+ feature_info_->workarounds()
+ .use_non_zero_size_for_client_side_stream_buffers;
+}
+
+void BufferManager::SetInfo(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data) {
+ DCHECK(buffer);
+ memory_tracker_->TrackMemFree(buffer->size());
+ bool is_client_side_array = IsUsageClientSideArray(usage);
+ bool shadow = buffer->target() == GL_ELEMENT_ARRAY_BUFFER ||
+ allow_buffers_on_multiple_targets_ ||
+ is_client_side_array;
+ buffer->SetInfo(size, usage, shadow, data, is_client_side_array);
+ memory_tracker_->TrackMemAlloc(buffer->size());
+}
+
+void BufferManager::ValidateAndDoBufferData(
+ ContextState* context_state, GLenum target, GLsizeiptr size,
+ const GLvoid * data, GLenum usage) {
+ ErrorState* error_state = context_state->GetErrorState();
+ if (!feature_info_->validators()->buffer_target.IsValid(target)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, "glBufferData", target, "target");
+ return;
+ }
+ if (!feature_info_->validators()->buffer_usage.IsValid(usage)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, "glBufferData", usage, "usage");
+ return;
+ }
+ if (size < 0) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferData", "size < 0");
+ return;
+ }
+
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferData", "unknown buffer");
+ return;
+ }
+
+ if (!memory_tracker_->EnsureGPUMemoryAvailable(size)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_OUT_OF_MEMORY, "glBufferData", "out of memory");
+ return;
+ }
+
+ DoBufferData(error_state, buffer, size, usage, data);
+}
+
+
+void BufferManager::DoBufferData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLsizeiptr size,
+ GLenum usage,
+ const GLvoid* data) {
+ // Clear the buffer to 0 if no initial data was passed in.
+ scoped_ptr<int8[]> zero;
+ if (!data) {
+ zero.reset(new int8[size]);
+ memset(zero.get(), 0, size);
+ data = zero.get();
+ }
+
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, "glBufferData");
+ if (IsUsageClientSideArray(usage)) {
+ GLsizei empty_size = UseNonZeroSizeForClientSideArrayBuffer() ? 1 : 0;
+ glBufferData(buffer->target(), empty_size, NULL, usage);
+ } else {
+ glBufferData(buffer->target(), size, data, usage);
+ }
+ GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, "glBufferData");
+ if (error == GL_NO_ERROR) {
+ SetInfo(buffer, size, usage, data);
+ } else {
+ SetInfo(buffer, 0, usage, NULL);
+ }
+}
+
+void BufferManager::ValidateAndDoBufferSubData(
+ ContextState* context_state, GLenum target, GLintptr offset, GLsizeiptr size,
+ const GLvoid * data) {
+ ErrorState* error_state = context_state->GetErrorState();
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_VALUE, "glBufferSubData",
+ "unknown buffer");
+ return;
+ }
+
+ DoBufferSubData(error_state, buffer, offset, size, data);
+}
+
+void BufferManager::DoBufferSubData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data) {
+ if (!buffer->SetRange(offset, size, data)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferSubData", "out of range");
+ return;
+ }
+
+ if (!buffer->IsClientSideArray()) {
+ glBufferSubData(buffer->target(), offset, size, data);
+ }
+}
+
+void BufferManager::ValidateAndDoGetBufferParameteriv(
+ ContextState* context_state, GLenum target, GLenum pname, GLint* params) {
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(
+ context_state->GetErrorState(), GL_INVALID_OPERATION,
+ "glGetBufferParameteriv", "no buffer bound for target");
+ return;
+ }
+ switch (pname) {
+ case GL_BUFFER_SIZE:
+ *params = buffer->size();
+ break;
+ case GL_BUFFER_USAGE:
+ *params = buffer->usage();
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+bool BufferManager::SetTarget(Buffer* buffer, GLenum target) {
+ // Check that we are not trying to bind it to a different target.
+ if (buffer->target() != 0 && buffer->target() != target &&
+ !allow_buffers_on_multiple_targets_) {
+ return false;
+ }
+ if (buffer->target() == 0) {
+ buffer->set_target(target);
+ }
+ return true;
+}
+
+// Since one BufferManager can be shared by multiple decoders, ContextState is
+// passed in each time and not just passed in during initialization.
+Buffer* BufferManager::GetBufferInfoForTarget(
+ ContextState* state, GLenum target) {
+ DCHECK(target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER);
+ if (target == GL_ARRAY_BUFFER) {
+ return state->bound_array_buffer.get();
+ } else {
+ return state->vertex_attrib_manager->element_array_buffer();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/buffer_manager.h b/gpu/command_buffer/service/buffer_manager.h
new file mode 100644
index 0000000..cc23f01
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager.h
@@ -0,0 +1,286 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
+
+#include <map>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class BufferManager;
+struct ContextState;
+class ErrorState;
+class FeatureInfo;
+class TestHelper;
+
+// Info about Buffers currently in the system.
+class GPU_EXPORT Buffer : public base::RefCounted<Buffer> {
+ public:
+ Buffer(BufferManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLenum target() const {
+ return target_;
+ }
+
+ GLsizeiptr size() const {
+ return size_;
+ }
+
+ GLenum usage() const {
+ return usage_;
+ }
+
+ // Gets the maximum value in the buffer for the given range interpreted as
+ // the given type. Returns false if offset and count are out of range.
+ // offset is in bytes.
+ // count is in elements of type.
+ bool GetMaxValueForRange(GLuint offset, GLsizei count, GLenum type,
+ GLuint* max_value);
+
+ // Returns a pointer to shadowed data.
+ const void* GetRange(GLintptr offset, GLsizeiptr size) const;
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return target() && !IsDeleted();
+ }
+
+ bool IsClientSideArray() const {
+ return is_client_side_array_;
+ }
+
+ private:
+ friend class BufferManager;
+ friend class BufferManagerTestBase;
+ friend class base::RefCounted<Buffer>;
+
+ // Represents a range in a buffer.
+ class Range {
+ public:
+ Range(GLuint offset, GLsizei count, GLenum type)
+ : offset_(offset),
+ count_(count),
+ type_(type) {
+ }
+
+ // A less functor provided for std::map so it can find ranges.
+ struct Less {
+ bool operator() (const Range& lhs, const Range& rhs) const {
+ if (lhs.offset_ != rhs.offset_) {
+ return lhs.offset_ < rhs.offset_;
+ }
+ if (lhs.count_ != rhs.count_) {
+ return lhs.count_ < rhs.count_;
+ }
+ return lhs.type_ < rhs.type_;
+ }
+ };
+
+ private:
+ GLuint offset_;
+ GLsizei count_;
+ GLenum type_;
+ };
+
+ ~Buffer();
+
+ void set_target(GLenum target) {
+ DCHECK_EQ(target_, 0u); // you can only set this once.
+ target_ = target;
+ }
+
+ bool shadowed() const {
+ return shadowed_;
+ }
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // Sets the size, usage and initial data of a buffer.
+ // If shadow is true then if data is NULL buffer will be initialized to 0.
+ void SetInfo(
+ GLsizeiptr size, GLenum usage, bool shadow, const GLvoid* data,
+ bool is_client_side_array);
+
+ // Sets a range of data for this buffer. Returns false if the offset or size
+ // is out of range.
+ bool SetRange(
+ GLintptr offset, GLsizeiptr size, const GLvoid * data);
+
+ // Clears any cache of index ranges.
+ void ClearCache();
+
+ // Check if an offset, size range is valid for the current buffer.
+ bool CheckRange(GLintptr offset, GLsizeiptr size) const;
+
+ // The manager that owns this Buffer.
+ BufferManager* manager_;
+
+ // A copy of the data in the buffer. This data is only kept if the target
+ // is backed_ = true.
+ scoped_ptr<int8[]> shadow_;
+
+ // Size of buffer.
+ GLsizeiptr size_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // Whether or not the data is shadowed.
+ bool shadowed_;
+
+ // Whether or not this Buffer is not uploaded to the GPU but just
+ // sitting in local memory.
+ bool is_client_side_array_;
+
+ // Service side buffer id.
+ GLuint service_id_;
+
+ // The type of buffer. 0 = unset, GL_BUFFER_ARRAY = vertex data,
+ // GL_ELEMENT_BUFFER_ARRAY = index data.
+ // Once set a buffer can not be used for something else.
+ GLenum target_;
+
+ // Usage of buffer.
+ GLenum usage_;
+
+ // A map of ranges to the highest value in that range of a certain type.
+ typedef std::map<Range, GLuint, Range::Less> RangeToMaxValueMap;
+ RangeToMaxValueMap range_set_;
+};
+
+// This class keeps track of the buffers and their sizes so we can do
+// bounds checking.
+//
+// NOTE: To support shared resources an instance of this class will need to be
+// shared by multiple GLES2Decoders.
+class GPU_EXPORT BufferManager {
+ public:
+ BufferManager(MemoryTracker* memory_tracker, FeatureInfo* feature_info);
+ ~BufferManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Buffer for the given buffer.
+ void CreateBuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the buffer info for the given buffer.
+ Buffer* GetBuffer(GLuint client_id);
+
+ // Removes a buffer info for the given buffer.
+ void RemoveBuffer(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ // Validates a glBufferSubData, and then calls DoBufferData if validation was
+ // successful.
+ void ValidateAndDoBufferSubData(
+ ContextState* context_state, GLenum target, GLintptr offset,
+ GLsizeiptr size, const GLvoid * data);
+
+ // Validates a glBufferData, and then calls DoBufferData if validation was
+ // successful.
+ void ValidateAndDoBufferData(
+ ContextState* context_state, GLenum target, GLsizeiptr size,
+ const GLvoid * data, GLenum usage);
+
+ // Validates a glGetBufferParameteriv, and then calls GetBufferParameteriv if
+ // validation was successful.
+ void ValidateAndDoGetBufferParameteriv(
+ ContextState* context_state, GLenum target, GLenum pname, GLint* params);
+
+ // Sets the target of a buffer. Returns false if the target can not be set.
+ bool SetTarget(Buffer* buffer, GLenum target);
+
+ void set_allow_buffers_on_multiple_targets(bool allow) {
+ allow_buffers_on_multiple_targets_ = allow;
+ }
+
+ size_t mem_represented() const {
+ return memory_tracker_->GetMemRepresented();
+ }
+
+ // Tells for a given usage if this would be a client side array.
+ bool IsUsageClientSideArray(GLenum usage);
+
+ // Tells whether a buffer that is emulated using client-side arrays should be
+ // set to a non-zero size.
+ bool UseNonZeroSizeForClientSideArrayBuffer();
+
+ private:
+ friend class Buffer;
+ friend class TestHelper; // Needs access to DoBufferData.
+ friend class BufferManagerTestBase; // Needs access to DoBufferSubData.
+ void StartTracking(Buffer* buffer);
+ void StopTracking(Buffer* buffer);
+
+ Buffer* GetBufferInfoForTarget(ContextState* state, GLenum target);
+
+ // Does a glBufferSubData and updates the approriate accounting.
+ // Assumes the values have already been validated.
+ void DoBufferSubData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data);
+
+ // Does a glBufferData and updates the approprate accounting. Currently
+ // Assumes the values have already been validated.
+ void DoBufferData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLsizeiptr size,
+ GLenum usage,
+ const GLvoid* data);
+
+ // Sets the size, usage and initial data of a buffer.
+ // If data is NULL buffer will be initialized to 0 if shadowed.
+ void SetInfo(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data);
+
+ scoped_ptr<MemoryTypeTracker> memory_tracker_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ // Info for each buffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Buffer> > BufferMap;
+ BufferMap buffers_;
+
+ // Whether or not buffers can be bound to multiple targets.
+ bool allow_buffers_on_multiple_targets_;
+
+ // Counts the number of Buffer allocated with 'this' as its manager.
+ // Allows to check no Buffer will outlive this.
+ unsigned int buffer_count_;
+
+ bool have_context_;
+ bool use_client_side_arrays_for_stream_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/buffer_manager_unittest.cc b/gpu/command_buffer/service/buffer_manager_unittest.cc
new file mode 100644
index 0000000..77f32dc
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager_unittest.cc
@@ -0,0 +1,423 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+class BufferManagerTestBase : public GpuServiceTest {
+ protected:
+ void SetUpBase(
+ MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ const char* extensions) {
+ GpuServiceTest::SetUp();
+ if (feature_info) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), extensions);
+ feature_info->Initialize();
+ }
+ error_state_.reset(new MockErrorState());
+ manager_.reset(new BufferManager(memory_tracker, feature_info));
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(false);
+ manager_.reset();
+ error_state_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ GLenum GetTarget(const Buffer* buffer) const {
+ return buffer->target();
+ }
+
+ void DoBufferData(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data,
+ GLenum error) {
+ TestHelper::DoBufferData(
+ gl_.get(), error_state_.get(), manager_.get(),
+ buffer, size, usage, data, error);
+ }
+
+ bool DoBufferSubData(
+ Buffer* buffer, GLintptr offset, GLsizeiptr size,
+ const GLvoid* data) {
+ bool success = true;
+ if (!buffer->CheckRange(offset, size)) {
+ EXPECT_CALL(*error_state_, SetGLError(_, _, GL_INVALID_VALUE, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ success = false;
+ } else if (!buffer->IsClientSideArray()) {
+ EXPECT_CALL(*gl_, BufferSubData(
+ buffer->target(), offset, size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ manager_->DoBufferSubData(
+ error_state_.get(), buffer, offset, size, data);
+ return success;
+ }
+
+ scoped_ptr<BufferManager> manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+class BufferManagerTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ SetUpBase(NULL, NULL, "");
+ }
+};
+
+class BufferManagerMemoryTrackerTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ SetUpBase(mock_memory_tracker_.get(), NULL, "");
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+class BufferManagerClientSideArraysTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ feature_info_ = new FeatureInfo();
+ feature_info_->workarounds_.use_client_side_arrays_for_stream_buffers =
+ true;
+ SetUpBase(NULL, feature_info_.get(), "");
+ }
+
+ scoped_refptr<FeatureInfo> feature_info_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+TEST_F(BufferManagerTest, Basic) {
+ const GLuint kClientBuffer1Id = 1;
+ const GLuint kServiceBuffer1Id = 11;
+ const GLsizeiptr kBuffer1Size = 123;
+ const GLuint kClientBuffer2Id = 2;
+ // Check we can create buffer.
+ manager_->CreateBuffer(kClientBuffer1Id, kServiceBuffer1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClientBuffer1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ EXPECT_EQ(0u, GetTarget(buffer1));
+ EXPECT_EQ(0, buffer1->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_STATIC_DRAW), buffer1->usage());
+ EXPECT_FALSE(buffer1->IsDeleted());
+ EXPECT_FALSE(buffer1->IsClientSideArray());
+ EXPECT_EQ(kServiceBuffer1Id, buffer1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_->GetClientId(buffer1->service_id(), &client_id));
+ EXPECT_EQ(kClientBuffer1Id, client_id);
+ manager_->SetTarget(buffer1, GL_ELEMENT_ARRAY_BUFFER);
+ EXPECT_EQ(static_cast<GLenum>(GL_ELEMENT_ARRAY_BUFFER), GetTarget(buffer1));
+ // Check we and set its size.
+ DoBufferData(buffer1, kBuffer1Size, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_EQ(kBuffer1Size, buffer1->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_DYNAMIC_DRAW), buffer1->usage());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(manager_->GetBuffer(kClientBuffer2Id) == NULL);
+ // Check trying to a remove non-existent buffers does not crash.
+ manager_->RemoveBuffer(kClientBuffer2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kServiceBuffer1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the buffer after we remove it.
+ manager_->RemoveBuffer(kClientBuffer1Id);
+ EXPECT_TRUE(manager_->GetBuffer(kClientBuffer1Id) == NULL);
+}
+
+TEST_F(BufferManagerMemoryTrackerTest, Basic) {
+ const GLuint kClientBuffer1Id = 1;
+ const GLuint kServiceBuffer1Id = 11;
+ const GLsizeiptr kBuffer1Size1 = 123;
+ const GLsizeiptr kBuffer1Size2 = 456;
+ // Check we can create buffer.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kManaged);
+ manager_->CreateBuffer(kClientBuffer1Id, kServiceBuffer1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClientBuffer1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ manager_->SetTarget(buffer1, GL_ELEMENT_ARRAY_BUFFER);
+ // Check we and set its size.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, kBuffer1Size1, MemoryTracker::kManaged);
+ DoBufferData(buffer1, kBuffer1Size1, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(kBuffer1Size1, 0, MemoryTracker::kManaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, kBuffer1Size2, MemoryTracker::kManaged);
+ DoBufferData(buffer1, kBuffer1Size2, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ // On delete it will get freed.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(kBuffer1Size2, 0, MemoryTracker::kManaged);
+}
+
+TEST_F(BufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create buffer.
+ manager_->CreateBuffer(kClient1Id, kService1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClient1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ // Check the resources were released.
+ buffer1 = manager_->GetBuffer(kClient1Id);
+ ASSERT_TRUE(buffer1 == NULL);
+}
+
+TEST_F(BufferManagerTest, DoBufferSubData) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ EXPECT_TRUE(DoBufferSubData(buffer, sizeof(data), 0, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, sizeof(data), 1, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, 0, sizeof(data) + 1, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, -1, sizeof(data), data));
+ EXPECT_FALSE(DoBufferSubData(buffer, 0, -1, data));
+ DoBufferData(buffer, 1, GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ const int size = 0x20000;
+ scoped_ptr<uint8[]> temp(new uint8[size]);
+ EXPECT_FALSE(DoBufferSubData(buffer, 0 - size, size, temp.get()));
+ EXPECT_FALSE(DoBufferSubData(buffer, 1, size / 2, temp.get()));
+}
+
+TEST_F(BufferManagerTest, GetRange) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ const char* buf =
+ static_cast<const char*>(buffer->GetRange(0, sizeof(data)));
+ ASSERT_TRUE(buf != NULL);
+ const char* buf1 =
+ static_cast<const char*>(buffer->GetRange(1, sizeof(data) - 1));
+ EXPECT_EQ(buf + 1, buf1);
+ EXPECT_TRUE(buffer->GetRange(sizeof(data), 1) == NULL);
+ EXPECT_TRUE(buffer->GetRange(0, sizeof(data) + 1) == NULL);
+ EXPECT_TRUE(buffer->GetRange(-1, sizeof(data)) == NULL);
+ EXPECT_TRUE(buffer->GetRange(-0, -1) == NULL);
+ const int size = 0x20000;
+ DoBufferData(buffer, size / 2, GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(buffer->GetRange(0 - size, size) == NULL);
+ EXPECT_TRUE(buffer->GetRange(1, size / 2) == NULL);
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint8) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint8 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 4, 3, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 4, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 4, 3, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 0, 11, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 10, 1, GL_UNSIGNED_BYTE, &max_value));
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint16) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint16 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint16 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check odd offset fails for GL_UNSIGNED_SHORT.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 1, 10, GL_UNSIGNED_SHORT, &max_value));
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 8, 3, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 8, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 8, 3, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 0, 11, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 20, 1, GL_UNSIGNED_SHORT, &max_value));
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint32) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint32 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check non aligned offsets fails for GL_UNSIGNED_INT.
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(1, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(2, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(3, 10, GL_UNSIGNED_INT, &max_value));
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(16, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 16, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(16, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(0, 11, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(40, 1, GL_UNSIGNED_INT, &max_value));
+}
+
+TEST_F(BufferManagerTest, UseDeletedBuffer) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ scoped_refptr<Buffer> buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer.get() != NULL);
+ manager_->SetTarget(buffer.get(), GL_ARRAY_BUFFER);
+ // Remove buffer
+ manager_->RemoveBuffer(kClientBufferId);
+ // Use it after removing
+ DoBufferData(buffer.get(), sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kServiceBufferId)))
+ .Times(1)
+ .RetiresOnSaturation();
+ buffer = NULL;
+}
+
+// Test buffers get shadowed when they are supposed to be.
+TEST_F(BufferManagerClientSideArraysTest, StreamBuffersAreShadowed) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ static const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STREAM_DRAW, data, GL_NO_ERROR);
+ EXPECT_TRUE(buffer->IsClientSideArray());
+ EXPECT_EQ(0, memcmp(data, buffer->GetRange(0, sizeof(data)), sizeof(data)));
+ DoBufferData(buffer, sizeof(data), GL_DYNAMIC_DRAW, data, GL_NO_ERROR);
+ EXPECT_FALSE(buffer->IsClientSideArray());
+}
+
+TEST_F(BufferManagerTest, MaxValueCacheClearedCorrectly) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data1[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint32 data2[] = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
+ const uint32 data3[] = {30, 29, 28};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ GLuint max_value;
+ // Load the buffer with some initial data, and then get the maximum value for
+ // a range, which has the side effect of caching it.
+ DoBufferData(buffer, sizeof(data1), GL_STATIC_DRAW, data1, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check that any cached values are invalidated if the buffer is reloaded
+ // with the same amount of data (but different content)
+ ASSERT_EQ(sizeof(data2), sizeof(data1));
+ DoBufferData(buffer, sizeof(data2), GL_STATIC_DRAW, data2, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(20u, max_value);
+ // Check that any cached values are invalidated if the buffer is reloaded
+ // with entirely different content.
+ ASSERT_NE(sizeof(data3), sizeof(data1));
+ DoBufferData(buffer, sizeof(data3), GL_STATIC_DRAW, data3, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(30u, max_value);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/cmd_buffer_engine.h b/gpu/command_buffer/service/cmd_buffer_engine.h
new file mode 100644
index 0000000..75e6069
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_buffer_engine.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the CommandBufferEngine class, providing the main loop for
+// the service, exposing the RPC API, managing the command parser.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/common/buffer.h"
+
+namespace gpu {
+
+class CommandBufferEngine {
+ public:
+ CommandBufferEngine() {
+ }
+
+ virtual ~CommandBufferEngine() {
+ }
+
+ // Gets the base address and size of a registered shared memory buffer.
+ // Parameters:
+ // shm_id: the identifier for the shared memory buffer.
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id) = 0;
+
+ // Sets the token value.
+ virtual void set_token(int32 token) = 0;
+
+ // Sets the shared memory buffer used for commands.
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) = 0;
+
+ // Sets the "get" pointer. Return false if offset is out of range.
+ virtual bool SetGetOffset(int32 offset) = 0;
+
+ // Gets the "get" pointer.
+ virtual int32 GetGetOffset() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferEngine);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
diff --git a/gpu/command_buffer/service/cmd_parser.cc b/gpu/command_buffer/service/cmd_parser.cc
new file mode 100644
index 0000000..ffcdfff
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the command parser.
+
+#include "gpu/command_buffer/service/cmd_parser.h"
+
+#include "base/logging.h"
+#include "base/debug/trace_event.h"
+
+namespace gpu {
+
+CommandParser::CommandParser(AsyncAPIInterface* handler)
+ : get_(0),
+ put_(0),
+ buffer_(NULL),
+ entry_count_(0),
+ handler_(handler) {
+}
+
+void CommandParser::SetBuffer(
+ void* shm_address,
+ size_t shm_size,
+ ptrdiff_t offset,
+ size_t size) {
+ // check proper alignments.
+ DCHECK_EQ(0, (reinterpret_cast<intptr_t>(shm_address)) % 4);
+ DCHECK_EQ(0, offset % 4);
+ DCHECK_EQ(0u, size % 4);
+ // check that the command buffer fits into the memory buffer.
+ DCHECK_GE(shm_size, offset + size);
+ get_ = 0;
+ put_ = 0;
+ char* buffer_begin = static_cast<char*>(shm_address) + offset;
+ buffer_ = reinterpret_cast<CommandBufferEntry*>(buffer_begin);
+ entry_count_ = size / 4;
+}
+
+// Process one command, reading the header from the command buffer, and
+// forwarding the command index and the arguments to the handler.
+// Note that:
+// - validation needs to happen on a copy of the data (to avoid race
+// conditions). This function only validates the header, leaving the arguments
+// validation to the handler, so it can pass a reference to them.
+// - get_ is modified *after* the command has been executed.
+error::Error CommandParser::ProcessCommands(int num_commands) {
+ int num_entries = put_ < get_ ? entry_count_ - get_ : put_ - get_;
+ int entries_processed = 0;
+
+ error::Error result = handler_->DoCommands(
+ num_commands, buffer_ + get_, num_entries, &entries_processed);
+
+ get_ += entries_processed;
+ if (get_ == entry_count_)
+ get_ = 0;
+
+ return result;
+}
+
+// Processes all the commands, while the buffer is not empty. Stop if an error
+// is encountered.
+error::Error CommandParser::ProcessAllCommands() {
+ while (!IsEmpty()) {
+ error::Error error = ProcessCommands(kParseCommandsSlice);
+ if (error)
+ return error;
+ }
+ return error::kNoError;
+}
+
+// Decode multiple commands, and call the corresponding GL functions.
+// NOTE: buffer is a pointer to the command buffer. As such, it could be
+// changed by a (malicious) client at any time, so if validation has to happen,
+// it should operate on a copy of them.
+error::Error AsyncAPIInterface::DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ int commands_to_process = num_commands;
+ error::Error result = error::kNoError;
+ const CommandBufferEntry* cmd_data =
+ static_cast<const CommandBufferEntry*>(buffer);
+ int process_pos = 0;
+
+ while (process_pos < num_entries && result == error::kNoError &&
+ commands_to_process--) {
+ CommandHeader header = cmd_data->value_header;
+ if (header.size == 0) {
+ DVLOG(1) << "Error: zero sized command in command buffer";
+ return error::kInvalidSize;
+ }
+
+ if (static_cast<int>(header.size) + process_pos > num_entries) {
+ DVLOG(1) << "Error: get offset out of bounds";
+ return error::kOutOfBounds;
+ }
+
+ const unsigned int command = header.command;
+ const unsigned int arg_count = header.size - 1;
+
+ result = DoCommand(command, arg_count, cmd_data);
+
+ if (result != error::kDeferCommandUntilLater) {
+ process_pos += header.size;
+ cmd_data += header.size;
+ }
+ }
+
+ if (entries_processed)
+ *entries_processed = process_pos;
+
+ return result;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/cmd_parser.h b/gpu/command_buffer/service/cmd_parser.h
new file mode 100644
index 0000000..ac52d86
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the command parser class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
+
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class AsyncAPIInterface;
+
+// Command parser class. This class parses commands from a shared memory
+// buffer, to implement some asynchronous RPC mechanism.
+class GPU_EXPORT CommandParser {
+ public:
+ static const int kParseCommandsSlice = 20;
+
+ explicit CommandParser(AsyncAPIInterface* handler);
+
+ // Sets the buffer to read commands from.
+ void SetBuffer(
+ void* shm_address,
+ size_t shm_size,
+ ptrdiff_t offset,
+ size_t size);
+
+ // Gets the "get" pointer. The get pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ CommandBufferOffset get() const { return get_; }
+
+ // Sets the "get" pointer. The get pointer is an index into the command buffer
+ // considered as an array of CommandBufferEntry.
+ bool set_get(CommandBufferOffset get) {
+ if (get >= 0 && get < entry_count_) {
+ get_ = get;
+ return true;
+ }
+ return false;
+ }
+
+ // Sets the "put" pointer. The put pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ void set_put(CommandBufferOffset put) { put_ = put; }
+
+ // Gets the "put" pointer. The put pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ CommandBufferOffset put() const { return put_; }
+
+ // Checks whether there are commands to process.
+ bool IsEmpty() const { return put_ == get_; }
+
+ // Processes one command, updating the get pointer. This will return an error
+ // if there are no commands in the buffer.
+ error::Error ProcessCommands(int num_commands);
+
+ // Processes all commands until get == put.
+ error::Error ProcessAllCommands();
+
+ private:
+ CommandBufferOffset get_;
+ CommandBufferOffset put_;
+ CommandBufferEntry* buffer_;
+ int32 entry_count_;
+ AsyncAPIInterface* handler_;
+};
+
+// This class defines the interface for an asynchronous API handler, that
+// is responsible for de-multiplexing commands and their arguments.
+class GPU_EXPORT AsyncAPIInterface {
+ public:
+ AsyncAPIInterface() {}
+ virtual ~AsyncAPIInterface() {}
+
+ // Executes a single command.
+ // Parameters:
+ // command: the command index.
+ // arg_count: the number of CommandBufferEntry arguments.
+ // cmd_data: the command data.
+ // Returns:
+ // error::kNoError if no error was found, one of
+ // error::Error otherwise.
+ virtual error::Error DoCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) = 0;
+
+ // Executes multiple commands.
+ // Parameters:
+ // num_commands: maximum number of commands to execute from buffer.
+ // buffer: pointer to first command entry to process.
+ // num_entries: number of sequential command buffer entries in buffer.
+ // entries_processed: if not 0, is set to the number of entries processed.
+ virtual error::Error DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Returns a name for a command. Useful for logging / debuging.
+ virtual const char* GetCommandName(unsigned int command_id) const = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
diff --git a/gpu/command_buffer/service/cmd_parser_test.cc b/gpu/command_buffer/service/cmd_parser_test.cc
new file mode 100644
index 0000000..d880830
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser_test.cc
@@ -0,0 +1,313 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the command parser.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+using testing::_;
+using testing::Invoke;
+using testing::Mock;
+using testing::Return;
+using testing::Sequence;
+using testing::SetArgPointee;
+using testing::Truly;
+
+// Test fixture for CommandParser test - Creates a mock AsyncAPIInterface, and
+// a fixed size memory buffer. Also provides a simple API to create a
+// CommandParser.
+class CommandParserTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(false));
+ buffer_entry_count_ = 20;
+ buffer_.reset(new CommandBufferEntry[buffer_entry_count_]);
+ }
+ virtual void TearDown() {}
+
+ void AddDoCommandsExpect(error::Error _return,
+ unsigned int num_commands,
+ int num_entries,
+ int num_processed) {
+ EXPECT_CALL(*api_mock_, DoCommands(num_commands, _, num_entries, _))
+ .InSequence(sequence_)
+ .WillOnce(DoAll(SetArgPointee<3>(num_processed), Return(_return)));
+ }
+
+ // Creates a parser, with a buffer of the specified size (in entries).
+ CommandParser *MakeParser(unsigned int entry_count) {
+ size_t shm_size = buffer_entry_count_ *
+ sizeof(CommandBufferEntry); // NOLINT
+ size_t command_buffer_size = entry_count *
+ sizeof(CommandBufferEntry); // NOLINT
+ DCHECK_LE(command_buffer_size, shm_size);
+ CommandParser* parser = new CommandParser(api_mock());
+
+ parser->SetBuffer(buffer(), shm_size, 0, command_buffer_size);
+ return parser;
+ }
+
+ unsigned int buffer_entry_count() { return 20; }
+ AsyncAPIMock *api_mock() { return api_mock_.get(); }
+ CommandBufferEntry *buffer() { return buffer_.get(); }
+ private:
+ unsigned int buffer_entry_count_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<CommandBufferEntry[]> buffer_;
+ Sequence sequence_;
+};
+
+// Tests initialization conditions.
+TEST_F(CommandParserTest, TestInit) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ EXPECT_EQ(0, parser->get());
+ EXPECT_EQ(0, parser->put());
+ EXPECT_TRUE(parser->IsEmpty());
+}
+
+// Tests simple commands.
+TEST_F(CommandParserTest, TestSimple) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add a single command, no args
+ header.size = 1;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(error::kNoError, 1, 1, 1);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add a single command, 2 args
+ header.size = 3;
+ header.command = 456;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 2134;
+ buffer()[put++].value_float = 1.f;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(error::kNoError, 1, 3, 3);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests having multiple commands in the buffer.
+TEST_F(CommandParserTest, TestMultipleCommands) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add 2 commands, test with single ProcessCommands()
+ header.size = 2;
+ header.command = 789;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5151;
+
+ CommandBufferOffset put_cmd2 = put;
+ header.size = 2;
+ header.command = 876;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 3434;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // Process up to 1 command. 4 entries remaining.
+ AddDoCommandsExpect(error::kNoError, 1, 4, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put_cmd2, parser->get());
+
+ // Process up to 1 command. 2 entries remaining.
+ AddDoCommandsExpect(error::kNoError, 1, 2, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add 2 commands again, test with ProcessAllCommands()
+ header.size = 2;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5656;
+
+ header.size = 2;
+ header.command = 321;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 7878;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // 4 entries remaining.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 4, 4);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests that the parser will wrap correctly at the end of the buffer.
+TEST_F(CommandParserTest, TestWrap) {
+ scoped_ptr<CommandParser> parser(MakeParser(5));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add 3 commands with no args (1 word each)
+ for (unsigned int i = 0; i < 3; ++i) {
+ header.size = 1;
+ header.command = i;
+ buffer()[put++].value_header = header;
+ }
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // Process up to 10 commands. 3 entries remaining to put.
+ AddDoCommandsExpect(error::kNoError, 10, 3, 3);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(10));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add 1 command with 1 arg (2 words). That should put us at the end of the
+ // buffer.
+ header.size = 2;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5;
+
+ DCHECK_EQ(5, put);
+ put = 0;
+
+ // add 1 command with 1 arg (2 words).
+ header.size = 2;
+ header.command = 4;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 6;
+
+ // 2 entries remaining to end of buffer.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ // 2 entries remaining to put.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests error conditions.
+TEST_F(CommandParserTest, TestError) {
+ const unsigned int kNumEntries = 5;
+ scoped_ptr<CommandParser> parser(MakeParser(kNumEntries));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ EXPECT_FALSE(parser->set_get(-1));
+ EXPECT_FALSE(parser->set_get(kNumEntries));
+
+ // Generate a command with size 0.
+ header.size = 0;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(
+ error::kInvalidSize, CommandParser::kParseCommandsSlice, 1, 0);
+ EXPECT_EQ(error::kInvalidSize,
+ parser->ProcessAllCommands());
+ // check that no DoCommand call was made.
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ parser.reset(MakeParser(5));
+ put = parser->put();
+
+ // Generate a command with size 6, extends beyond the end of the buffer.
+ header.size = 6;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(
+ error::kOutOfBounds, CommandParser::kParseCommandsSlice, 1, 0);
+ EXPECT_EQ(error::kOutOfBounds,
+ parser->ProcessAllCommands());
+ // check that no DoCommand call was made.
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ parser.reset(MakeParser(5));
+ put = parser->put();
+
+ // Generates 2 commands.
+ header.size = 1;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+ CommandBufferOffset put_post_fail = put;
+ header.size = 1;
+ header.command = 4;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+ // have the first command fail to parse.
+ AddDoCommandsExpect(
+ error::kUnknownCommand, CommandParser::kParseCommandsSlice, 2, 1);
+ EXPECT_EQ(error::kUnknownCommand,
+ parser->ProcessAllCommands());
+ // check that only one command was executed, and that get reflects that
+ // correctly.
+ EXPECT_EQ(put_post_fail, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+ // make the second one succeed, and check that the parser recovered fine.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 1, 1);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+TEST_F(CommandParserTest, SetBuffer) {
+ scoped_ptr<CommandParser> parser(MakeParser(3));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add a single command, no args
+ header.size = 2;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 456;
+ parser->set_put(put);
+
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ // We should have advanced 2 entries
+ EXPECT_EQ(2, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ scoped_ptr<CommandBufferEntry[]> buffer2(new CommandBufferEntry[2]);
+ parser->SetBuffer(
+ buffer2.get(), sizeof(CommandBufferEntry) * 2, 0,
+ sizeof(CommandBufferEntry) * 2);
+ // The put and get should have reset to 0.
+ EXPECT_EQ(0, parser->get());
+ EXPECT_EQ(0, parser->put());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.cc b/gpu/command_buffer/service/command_buffer_service.cc
new file mode 100644
index 0000000..2c732c6
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service.cc
@@ -0,0 +1,192 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/command_buffer_service.h"
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+CommandBufferService::CommandBufferService(
+ TransferBufferManagerInterface* transfer_buffer_manager)
+ : ring_buffer_id_(-1),
+ shared_state_(NULL),
+ num_entries_(0),
+ get_offset_(0),
+ put_offset_(0),
+ transfer_buffer_manager_(transfer_buffer_manager),
+ token_(0),
+ generation_(0),
+ error_(error::kNoError),
+ context_lost_reason_(error::kUnknown) {
+}
+
+CommandBufferService::~CommandBufferService() {
+}
+
+bool CommandBufferService::Initialize() {
+ return true;
+}
+
+CommandBufferService::State CommandBufferService::GetLastState() {
+ State state;
+ state.num_entries = num_entries_;
+ state.get_offset = get_offset_;
+ state.put_offset = put_offset_;
+ state.token = token_;
+ state.error = error_;
+ state.context_lost_reason = context_lost_reason_;
+ state.generation = ++generation_;
+
+ return state;
+}
+
+int32 CommandBufferService::GetLastToken() {
+ return GetLastState().token;
+}
+
+void CommandBufferService::UpdateState() {
+ if (shared_state_) {
+ CommandBufferService::State state = GetLastState();
+ shared_state_->Write(state);
+ }
+}
+
+void CommandBufferService::WaitForTokenInRange(int32 start, int32 end) {
+ DCHECK(error_ != error::kNoError || InRange(start, end, token_));
+}
+
+void CommandBufferService::WaitForGetOffsetInRange(int32 start, int32 end) {
+ DCHECK(error_ != error::kNoError || InRange(start, end, get_offset_));
+}
+
+void CommandBufferService::Flush(int32 put_offset) {
+ if (put_offset < 0 || put_offset > num_entries_) {
+ error_ = gpu::error::kOutOfBounds;
+ return;
+ }
+
+ put_offset_ = put_offset;
+
+ if (!put_offset_change_callback_.is_null())
+ put_offset_change_callback_.Run();
+}
+
+void CommandBufferService::SetGetBuffer(int32 transfer_buffer_id) {
+ DCHECK_EQ(-1, ring_buffer_id_);
+ DCHECK_EQ(put_offset_, get_offset_); // Only if it's empty.
+ // If the buffer is invalid we handle it gracefully.
+ // This means ring_buffer_ can be NULL.
+ ring_buffer_ = GetTransferBuffer(transfer_buffer_id);
+ ring_buffer_id_ = transfer_buffer_id;
+ int32 size = ring_buffer_.get() ? ring_buffer_->size() : 0;
+ num_entries_ = size / sizeof(CommandBufferEntry);
+ put_offset_ = 0;
+ SetGetOffset(0);
+ if (!get_buffer_change_callback_.is_null()) {
+ get_buffer_change_callback_.Run(ring_buffer_id_);
+ }
+
+ UpdateState();
+}
+
+void CommandBufferService::SetSharedStateBuffer(
+ scoped_ptr<BufferBacking> shared_state_buffer) {
+ shared_state_buffer_ = shared_state_buffer.Pass();
+ DCHECK(shared_state_buffer_->GetSize() >= sizeof(*shared_state_));
+
+ shared_state_ =
+ static_cast<CommandBufferSharedState*>(shared_state_buffer_->GetMemory());
+
+ UpdateState();
+}
+
+void CommandBufferService::SetGetOffset(int32 get_offset) {
+ DCHECK(get_offset >= 0 && get_offset < num_entries_);
+ get_offset_ = get_offset;
+}
+
+scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(size_t size,
+ int32* id) {
+ *id = -1;
+
+ scoped_ptr<SharedMemory> shared_memory(new SharedMemory());
+ if (!shared_memory->CreateAndMapAnonymous(size))
+ return NULL;
+
+ static int32 next_id = 1;
+ *id = next_id++;
+
+ if (!RegisterTransferBuffer(
+ *id, MakeBackingFromSharedMemory(shared_memory.Pass(), size))) {
+ *id = -1;
+ return NULL;
+ }
+
+ return GetTransferBuffer(*id);
+}
+
+void CommandBufferService::DestroyTransferBuffer(int32 id) {
+ transfer_buffer_manager_->DestroyTransferBuffer(id);
+ if (id == ring_buffer_id_) {
+ ring_buffer_id_ = -1;
+ ring_buffer_ = NULL;
+ num_entries_ = 0;
+ get_offset_ = 0;
+ put_offset_ = 0;
+ }
+}
+
+scoped_refptr<Buffer> CommandBufferService::GetTransferBuffer(int32 id) {
+ return transfer_buffer_manager_->GetTransferBuffer(id);
+}
+
+bool CommandBufferService::RegisterTransferBuffer(
+ int32 id,
+ scoped_ptr<BufferBacking> buffer) {
+ return transfer_buffer_manager_->RegisterTransferBuffer(id, buffer.Pass());
+}
+
+void CommandBufferService::SetToken(int32 token) {
+ token_ = token;
+ UpdateState();
+}
+
+void CommandBufferService::SetParseError(error::Error error) {
+ if (error_ == error::kNoError) {
+ error_ = error;
+ if (!parse_error_callback_.is_null())
+ parse_error_callback_.Run();
+ }
+}
+
+void CommandBufferService::SetContextLostReason(
+ error::ContextLostReason reason) {
+ context_lost_reason_ = reason;
+}
+
+void CommandBufferService::SetPutOffsetChangeCallback(
+ const base::Closure& callback) {
+ put_offset_change_callback_ = callback;
+}
+
+void CommandBufferService::SetGetBufferChangeCallback(
+ const GetBufferChangedCallback& callback) {
+ get_buffer_change_callback_ = callback;
+}
+
+void CommandBufferService::SetParseErrorCallback(
+ const base::Closure& callback) {
+ parse_error_callback_ = callback;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.h b/gpu/command_buffer/service/command_buffer_service.h
new file mode 100644
index 0000000..ac23301
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
+
+#include "base/callback.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+
+namespace gpu {
+
+class TransferBufferManagerInterface;
+
+class GPU_EXPORT CommandBufferServiceBase : public CommandBuffer {
+ public:
+ // Sets the current get offset. This can be called from any thread.
+ virtual void SetGetOffset(int32 get_offset) = 0;
+
+ // Get the transfer buffer associated with an ID. Returns a null buffer for
+ // ID 0.
+ virtual scoped_refptr<gpu::Buffer> GetTransferBuffer(int32 id) = 0;
+
+ // Allows the reader to update the current token value.
+ virtual void SetToken(int32 token) = 0;
+
+ // Allows the reader to set the current parse error.
+ virtual void SetParseError(error::Error) = 0;
+
+ // Allows the reader to set the current context lost reason.
+ // NOTE: if calling this in conjunction with SetParseError,
+ // call this first.
+ virtual void SetContextLostReason(error::ContextLostReason) = 0;
+};
+
+// An object that implements a shared memory command buffer and a synchronous
+// API to manage the put and get pointers.
+class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
+ public:
+ typedef base::Callback<bool(int32)> GetBufferChangedCallback;
+ explicit CommandBufferService(
+ TransferBufferManagerInterface* transfer_buffer_manager);
+ virtual ~CommandBufferService();
+
+ // CommandBuffer implementation:
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void Flush(int32 put_offset) OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int32 transfer_buffer_id) OVERRIDE;
+ virtual scoped_refptr<Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+
+ // CommandBufferServiceBase implementation:
+ virtual void SetGetOffset(int32 get_offset) OVERRIDE;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+ virtual void SetToken(int32 token) OVERRIDE;
+ virtual void SetParseError(error::Error error) OVERRIDE;
+ virtual void SetContextLostReason(error::ContextLostReason) OVERRIDE;
+
+ // Sets a callback that is called whenever the put offset is changed. When
+ // called with sync==true, the callback must not return until some progress
+ // has been made (unless the command buffer is empty), i.e. the get offset
+ // must have changed. It need not process the entire command buffer though.
+ // This allows concurrency between the writer and the reader while giving the
+ // writer a means of waiting for the reader to make some progress before
+ // attempting to write more to the command buffer. Takes ownership of
+ // callback.
+ virtual void SetPutOffsetChangeCallback(const base::Closure& callback);
+ // Sets a callback that is called whenever the get buffer is changed.
+ virtual void SetGetBufferChangeCallback(
+ const GetBufferChangedCallback& callback);
+ virtual void SetParseErrorCallback(const base::Closure& callback);
+
+ // Setup the shared memory that shared state should be copied into.
+ void SetSharedStateBuffer(scoped_ptr<BufferBacking> shared_state_buffer);
+
+ // Copy the current state into the shared state transfer buffer.
+ void UpdateState();
+
+ // Registers an existing shared memory object and get an ID that can be used
+ // to identify it in the command buffer.
+ bool RegisterTransferBuffer(int32 id, scoped_ptr<BufferBacking> buffer);
+
+ private:
+ int32 ring_buffer_id_;
+ scoped_refptr<Buffer> ring_buffer_;
+ scoped_ptr<BufferBacking> shared_state_buffer_;
+ CommandBufferSharedState* shared_state_;
+ int32 num_entries_;
+ int32 get_offset_;
+ int32 put_offset_;
+ base::Closure put_offset_change_callback_;
+ GetBufferChangedCallback get_buffer_change_callback_;
+ base::Closure parse_error_callback_;
+ TransferBufferManagerInterface* transfer_buffer_manager_;
+ int32 token_;
+ uint32 generation_;
+ error::Error error_;
+ error::ContextLostReason context_lost_reason_;
+
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferService);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
diff --git a/gpu/command_buffer/service/command_buffer_service_unittest.cc b/gpu/command_buffer/service/command_buffer_service_unittest.cc
new file mode 100644
index 0000000..229aafa
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service_unittest.cc
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using base::SharedMemory;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+
+namespace gpu {
+
+class CommandBufferServiceTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+ }
+
+ int32 GetGetOffset() {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ int32 GetPutOffset() {
+ return command_buffer_->GetLastState().put_offset;
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+ int32 GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+ bool Initialize(size_t size) {
+ int32 id;
+ command_buffer_->CreateTransferBuffer(size, &id);
+ EXPECT_GT(id, 0);
+ command_buffer_->SetGetBuffer(id);
+ return true;
+ }
+
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+};
+
+TEST_F(CommandBufferServiceTest, InitializesCommandBuffer) {
+ EXPECT_TRUE(Initialize(1024));
+ CommandBuffer::State state = command_buffer_->GetLastState();
+ EXPECT_EQ(0, state.get_offset);
+ EXPECT_EQ(0, state.put_offset);
+ EXPECT_EQ(0, state.token);
+ EXPECT_EQ(error::kNoError, state.error);
+}
+
+namespace {
+
+class CallbackTest {
+ public:
+ virtual void PutOffsetChanged() = 0;
+ virtual bool GetBufferChanged(int32 id) = 0;
+};
+
+class MockCallbackTest : public CallbackTest {
+ public:
+ MOCK_METHOD0(PutOffsetChanged, void());
+ MOCK_METHOD1(GetBufferChanged, bool(int32));
+};
+
+} // anonymous namespace
+
+TEST_F(CommandBufferServiceTest, CanSyncGetAndPutOffset) {
+ Initialize(1024);
+
+ scoped_ptr<StrictMock<MockCallbackTest> > change_callback(
+ new StrictMock<MockCallbackTest>);
+ command_buffer_->SetPutOffsetChangeCallback(
+ base::Bind(
+ &CallbackTest::PutOffsetChanged,
+ base::Unretained(change_callback.get())));
+
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(2);
+ EXPECT_EQ(0, GetGetOffset());
+ EXPECT_EQ(2, GetPutOffset());
+
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(4);
+ EXPECT_EQ(0, GetGetOffset());
+ EXPECT_EQ(4, GetPutOffset());
+
+ command_buffer_->SetGetOffset(2);
+ EXPECT_EQ(2, GetGetOffset());
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(6);
+
+ command_buffer_->Flush(-1);
+ EXPECT_NE(error::kNoError, GetError());
+ command_buffer_->Flush(1024);
+ EXPECT_NE(error::kNoError, GetError());
+}
+
+TEST_F(CommandBufferServiceTest, SetGetBuffer) {
+ int32 ring_buffer_id;
+ command_buffer_->CreateTransferBuffer(1024, &ring_buffer_id);
+ EXPECT_GT(ring_buffer_id, 0);
+
+ scoped_ptr<StrictMock<MockCallbackTest> > change_callback(
+ new StrictMock<MockCallbackTest>);
+ command_buffer_->SetGetBufferChangeCallback(
+ base::Bind(
+ &CallbackTest::GetBufferChanged,
+ base::Unretained(change_callback.get())));
+
+ EXPECT_CALL(*change_callback, GetBufferChanged(ring_buffer_id))
+ .WillOnce(Return(true));
+
+ command_buffer_->SetGetBuffer(ring_buffer_id);
+ EXPECT_EQ(0, GetGetOffset());
+}
+
+TEST_F(CommandBufferServiceTest, DefaultTokenIsZero) {
+ EXPECT_EQ(0, GetToken());
+}
+
+TEST_F(CommandBufferServiceTest, CanSetToken) {
+ command_buffer_->SetToken(7);
+ EXPECT_EQ(7, GetToken());
+}
+
+TEST_F(CommandBufferServiceTest, DefaultParseErrorIsNoError) {
+ EXPECT_EQ(0, GetError());
+}
+
+TEST_F(CommandBufferServiceTest, CanSetParseError) {
+ command_buffer_->SetParseError(error::kInvalidSize);
+ EXPECT_EQ(1, GetError());
+}
+} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder.cc b/gpu/command_buffer/service/common_decoder.cc
new file mode 100644
index 0000000..86a37ba
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder.cc
@@ -0,0 +1,295 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+
+namespace gpu {
+
+CommonDecoder::Bucket::Bucket() : size_(0) {}
+
+CommonDecoder::Bucket::~Bucket() {}
+
+void* CommonDecoder::Bucket::GetData(size_t offset, size_t size) const {
+ if (OffsetSizeValid(offset, size)) {
+ return data_.get() + offset;
+ }
+ return NULL;
+}
+
+void CommonDecoder::Bucket::SetSize(size_t size) {
+ if (size != size_) {
+ data_.reset(size ? new int8[size] : NULL);
+ size_ = size;
+ memset(data_.get(), 0, size);
+ }
+}
+
+bool CommonDecoder::Bucket::SetData(
+ const void* src, size_t offset, size_t size) {
+ if (OffsetSizeValid(offset, size)) {
+ memcpy(data_.get() + offset, src, size);
+ return true;
+ }
+ return false;
+}
+
+void CommonDecoder::Bucket::SetFromString(const char* str) {
+ // Strings are passed NULL terminated to distinguish between empty string
+ // and no string.
+ if (!str) {
+ SetSize(0);
+ } else {
+ size_t size = strlen(str) + 1;
+ SetSize(size);
+ SetData(str, 0, size);
+ }
+}
+
+bool CommonDecoder::Bucket::GetAsString(std::string* str) {
+ DCHECK(str);
+ if (size_ == 0) {
+ return false;
+ }
+ str->assign(GetDataAs<const char*>(0, size_ - 1), size_ - 1);
+ return true;
+}
+
+CommonDecoder::CommonDecoder() : engine_(NULL) {}
+
+CommonDecoder::~CommonDecoder() {}
+
+void* CommonDecoder::GetAddressAndCheckSize(unsigned int shm_id,
+ unsigned int data_offset,
+ unsigned int data_size) {
+ CHECK(engine_);
+ scoped_refptr<gpu::Buffer> buffer = engine_->GetSharedMemoryBuffer(shm_id);
+ if (!buffer.get())
+ return NULL;
+ return buffer->GetDataAddress(data_offset, data_size);
+}
+
+scoped_refptr<gpu::Buffer> CommonDecoder::GetSharedMemoryBuffer(
+ unsigned int shm_id) {
+ return engine_->GetSharedMemoryBuffer(shm_id);
+}
+
+const char* CommonDecoder::GetCommonCommandName(
+ cmd::CommandId command_id) const {
+ return cmd::GetCommandName(command_id);
+}
+
+CommonDecoder::Bucket* CommonDecoder::GetBucket(uint32 bucket_id) const {
+ BucketMap::const_iterator iter(buckets_.find(bucket_id));
+ return iter != buckets_.end() ? &(*iter->second) : NULL;
+}
+
+CommonDecoder::Bucket* CommonDecoder::CreateBucket(uint32 bucket_id) {
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ bucket = new Bucket();
+ buckets_[bucket_id] = linked_ptr<Bucket>(bucket);
+ }
+ return bucket;
+}
+
+namespace {
+
+// Returns the address of the first byte after a struct.
+template <typename T>
+const void* AddressAfterStruct(const T& pod) {
+ return reinterpret_cast<const uint8*>(&pod) + sizeof(pod);
+}
+
+// Returns the address of the frst byte after the struct.
+template <typename RETURN_TYPE, typename COMMAND_TYPE>
+RETURN_TYPE GetImmediateDataAs(const COMMAND_TYPE& pod) {
+ return static_cast<RETURN_TYPE>(const_cast<void*>(AddressAfterStruct(pod)));
+}
+
+// TODO(vmiura): Looks like this g_command_info is duplicated in
+// common_decoder.cc
+// and gles2_cmd_decoder.cc. Fix it!
+
+// A struct to hold info about each command.
+struct CommandInfo {
+ uint8 arg_flags; // How to handle the arguments for this command
+ uint8 cmd_flags; // How to handle this command
+ uint16 arg_count; // How many arguments are expected for this command.
+};
+
+// A table of CommandInfo for all the commands.
+const CommandInfo g_command_info[] = {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) { \
+ cmd::name::kArgFlags, \
+ cmd::name::cmd_flags, \
+ sizeof(cmd::name) / sizeof(CommandBufferEntry) - 1, }, /* NOLINT */
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+};
+
+} // anonymous namespace.
+
+// Decode command with its arguments, and call the corresponding method.
+// Note: args is a pointer to the command buffer. As such, it could be changed
+// by a (malicious) client at any time, so if validation has to happen, it
+// should operate on a copy of them.
+error::Error CommonDecoder::DoCommonCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) {
+ if (command < arraysize(g_command_info)) {
+ const CommandInfo& info = g_command_info[command];
+ unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
+ if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
+ (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
+ uint32 immediate_data_size =
+ (arg_count - info_arg_count) * sizeof(CommandBufferEntry); // NOLINT
+ switch (command) {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) \
+ case cmd::name::kCmdId: \
+ return Handle ## name( \
+ immediate_data_size, \
+ *static_cast<const cmd::name*>(cmd_data)); \
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+ }
+ } else {
+ return error::kInvalidArguments;
+ }
+ }
+ return error::kUnknownCommand;
+}
+
+error::Error CommonDecoder::HandleNoop(
+ uint32 immediate_data_size,
+ const cmd::Noop& args) {
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetToken(
+ uint32 immediate_data_size,
+ const cmd::SetToken& args) {
+ engine_->set_token(args.token);
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketSize(
+ uint32 immediate_data_size,
+ const cmd::SetBucketSize& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 size = args.size;
+
+ Bucket* bucket = CreateBucket(bucket_id);
+ bucket->SetSize(size);
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketData(
+ uint32 immediate_data_size,
+ const cmd::SetBucketData& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ const void* data = GetSharedMemoryAs<const void*>(
+ args.shared_memory_id, args.shared_memory_offset, size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ if (!bucket->SetData(data, offset, size)) {
+ return error::kInvalidArguments;
+ }
+
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketDataImmediate(
+ uint32 immediate_data_size,
+ const cmd::SetBucketDataImmediate& args) {
+ const void* data = GetImmediateDataAs<const void*>(args);
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ if (size > immediate_data_size) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ if (!bucket->SetData(data, offset, size)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleGetBucketStart(
+ uint32 immediate_data_size,
+ const cmd::GetBucketStart& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32* result = GetSharedMemoryAs<uint32*>(
+ args.result_memory_id, args.result_memory_offset, sizeof(*result));
+ int32 data_memory_id = args.data_memory_id;
+ uint32 data_memory_offset = args.data_memory_offset;
+ uint32 data_memory_size = args.data_memory_size;
+ uint8* data = NULL;
+ if (data_memory_size != 0 || data_memory_id != 0 || data_memory_offset != 0) {
+ data = GetSharedMemoryAs<uint8*>(
+ args.data_memory_id, args.data_memory_offset, args.data_memory_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ }
+ if (!result) {
+ return error::kInvalidArguments;
+ }
+ // Check that the client initialized the result.
+ if (*result != 0) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 bucket_size = bucket->size();
+ *result = bucket_size;
+ if (data) {
+ uint32 size = std::min(data_memory_size, bucket_size);
+ memcpy(data, bucket->GetData(0, size), size);
+ }
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleGetBucketData(
+ uint32 immediate_data_size,
+ const cmd::GetBucketData& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ void* data = GetSharedMemoryAs<void*>(
+ args.shared_memory_id, args.shared_memory_offset, size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ const void* src = bucket->GetData(offset, size);
+ if (!src) {
+ return error::kInvalidArguments;
+ }
+ memcpy(data, src, size);
+ return error::kNoError;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder.h b/gpu/command_buffer/service/common_decoder.h
new file mode 100644
index 0000000..2132afb
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+
+#include <map>
+#include <stack>
+#include <string>
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferEngine;
+
+// This class is a helper base class for implementing the common parts of the
+// o3d/gl2 command buffer decoder.
+class GPU_EXPORT CommonDecoder : NON_EXPORTED_BASE(public AsyncAPIInterface) {
+ public:
+ typedef error::Error Error;
+
+ static const unsigned int kMaxStackDepth = 32;
+
+ // A bucket is a buffer to help collect memory across a command buffer. When
+ // creating a command buffer implementation of an existing API, sometimes that
+ // API has functions that take a pointer to data. A good example is OpenGL's
+ // glBufferData. Because the data is separated between client and service,
+ // there are 2 ways to get this data across. 1 is to put all the data in
+ // shared memory. The problem with this is the data can be arbitarily large
+ // and the host OS may not support that much shared memory. Another solution
+ // is to shuffle memory across a little bit at a time, collecting it on the
+ // service side and when it is all there then call glBufferData. Buckets
+ // implement this second solution. Using the common commands, SetBucketSize,
+ // SetBucketData, SetBucketDataImmediate the client can fill a bucket. It can
+ // then call a command that uses that bucket (like BufferDataBucket in the
+ // GLES2 command buffer implementation).
+ //
+ // If you are designing an API from scratch you can avoid this need for
+ // Buckets by making your API always take an offset and a size
+ // similar to glBufferSubData.
+ //
+ // Buckets also help pass strings to/from the service. To return a string of
+ // arbitary size, the service puts the string in a bucket. The client can
+ // then query the size of a bucket and request sections of the bucket to
+ // be passed across shared memory.
+ class GPU_EXPORT Bucket {
+ public:
+ Bucket();
+ ~Bucket();
+
+ size_t size() const {
+ return size_;
+ }
+
+ // Gets a pointer to a section the bucket. Returns NULL if offset or size is
+ // out of range.
+ void* GetData(size_t offset, size_t size) const;
+
+ template <typename T>
+ T GetDataAs(size_t offset, size_t size) const {
+ return reinterpret_cast<T>(GetData(offset, size));
+ }
+
+ // Sets the size of the bucket.
+ void SetSize(size_t size);
+
+ // Sets a part of the bucket.
+ // Returns false if offset or size is out of range.
+ bool SetData(const void* src, size_t offset, size_t size);
+
+ // Sets the bucket data from a string. Strings are passed NULL terminated to
+ // distinguish between empty string and no string.
+ void SetFromString(const char* str);
+
+ // Gets the bucket data as a string. Strings are passed NULL terminated to
+ // distrinquish between empty string and no string. Returns False if there
+ // is no string.
+ bool GetAsString(std::string* str);
+
+ private:
+ bool OffsetSizeValid(size_t offset, size_t size) const {
+ size_t temp = offset + size;
+ return temp <= size_ && temp >= offset;
+ }
+
+ size_t size_;
+ ::scoped_ptr<int8[]> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bucket);
+ };
+
+ CommonDecoder();
+ virtual ~CommonDecoder();
+
+ // Sets the engine, to get shared memory buffers from, and to set the token
+ // to.
+ void set_engine(CommandBufferEngine* engine) {
+ engine_ = engine;
+ }
+ CommandBufferEngine* engine() const { return engine_; }
+
+ // Creates a bucket. If the bucket already exists returns that bucket.
+ Bucket* CreateBucket(uint32 bucket_id);
+
+ // Gets a bucket. Returns NULL if the bucket does not exist.
+ Bucket* GetBucket(uint32 bucket_id) const;
+
+ // Gets the address of shared memory data, given a shared memory ID and an
+ // offset. Also checks that the size is consistent with the shared memory
+ // size.
+ // Parameters:
+ // shm_id: the id of the shared memory buffer.
+ // offset: the offset of the data in the shared memory buffer.
+ // size: the size of the data.
+ // Returns:
+ // NULL if shm_id isn't a valid shared memory buffer ID or if the size
+ // check fails. Return a pointer to the data otherwise.
+ void* GetAddressAndCheckSize(unsigned int shm_id,
+ unsigned int offset,
+ unsigned int size);
+
+ // Typed version of GetAddressAndCheckSize.
+ template <typename T>
+ T GetSharedMemoryAs(unsigned int shm_id, unsigned int offset,
+ unsigned int size) {
+ return static_cast<T>(GetAddressAndCheckSize(shm_id, offset, size));
+ }
+
+ // Get the actual shared memory buffer.
+ scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(unsigned int shm_id);
+
+ protected:
+ // Executes a common command.
+ // Parameters:
+ // command: the command index.
+ // arg_count: the number of CommandBufferEntry arguments.
+ // cmd_data: the command data.
+ // Returns:
+ // error::kNoError if no error was found, one of
+ // error::Error otherwise.
+ error::Error DoCommonCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data);
+
+ // Gets an name for a common command.
+ const char* GetCommonCommandName(cmd::CommandId command_id) const;
+
+ private:
+ // Generate a member function prototype for each command in an automated and
+ // typesafe way.
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) \
+ error::Error Handle##name( \
+ uint32 immediate_data_size, \
+ const cmd::name& args); \
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+
+ CommandBufferEngine* engine_;
+
+ typedef std::map<uint32, linked_ptr<Bucket> > BucketMap;
+ BucketMap buckets_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+
diff --git a/gpu/command_buffer/service/common_decoder_unittest.cc b/gpu/command_buffer/service/common_decoder_unittest.cc
new file mode 100644
index 0000000..0faa8e9
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder_unittest.cc
@@ -0,0 +1,513 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+TEST(CommonDecoderBucket, Basic) {
+ CommonDecoder::Bucket bucket;
+ EXPECT_EQ(0u, bucket.size());
+ EXPECT_TRUE(NULL == bucket.GetData(0, 0));
+}
+
+TEST(CommonDecoderBucket, Size) {
+ CommonDecoder::Bucket bucket;
+ bucket.SetSize(24);
+ EXPECT_EQ(24u, bucket.size());
+ bucket.SetSize(12);
+ EXPECT_EQ(12u, bucket.size());
+}
+
+TEST(CommonDecoderBucket, GetData) {
+ CommonDecoder::Bucket bucket;
+
+ bucket.SetSize(24);
+ EXPECT_TRUE(NULL != bucket.GetData(0, 0));
+ EXPECT_TRUE(NULL != bucket.GetData(24, 0));
+ EXPECT_TRUE(NULL == bucket.GetData(25, 0));
+ EXPECT_TRUE(NULL != bucket.GetData(0, 24));
+ EXPECT_TRUE(NULL == bucket.GetData(0, 25));
+ bucket.SetSize(23);
+ EXPECT_TRUE(NULL == bucket.GetData(0, 24));
+}
+
+TEST(CommonDecoderBucket, SetData) {
+ CommonDecoder::Bucket bucket;
+ static const char data[] = "testing";
+
+ bucket.SetSize(10);
+ EXPECT_TRUE(bucket.SetData(data, 0, sizeof(data)));
+ EXPECT_EQ(0, memcmp(data, bucket.GetData(0, sizeof(data)), sizeof(data)));
+ EXPECT_TRUE(bucket.SetData(data, 2, sizeof(data)));
+ EXPECT_EQ(0, memcmp(data, bucket.GetData(2, sizeof(data)), sizeof(data)));
+ EXPECT_FALSE(bucket.SetData(data, 0, sizeof(data) * 2));
+ EXPECT_FALSE(bucket.SetData(data, 5, sizeof(data)));
+}
+
+class TestCommonDecoder : public CommonDecoder {
+ public:
+ // Overridden from AsyncAPIInterface
+ virtual const char* GetCommandName(unsigned int command_id) const OVERRIDE {
+ return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
+ }
+
+ // Overridden from AsyncAPIInterface
+ virtual error::Error DoCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) OVERRIDE {
+ return DoCommonCommand(command, arg_count, cmd_data);
+ }
+
+ CommonDecoder::Bucket* GetBucket(uint32 id) const {
+ return CommonDecoder::GetBucket(id);
+ }
+};
+
+class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ static const int32 kStartValidShmId = 1;
+ static const int32 kValidShmId = 2;
+ static const int32 kInvalidShmId = 3;
+ static const size_t kBufferSize = 1024;
+ static const int32 kValidOffset = kBufferSize / 2;
+ static const int32 kInvalidOffset = kBufferSize;
+
+ MockCommandBufferEngine()
+ : CommandBufferEngine(),
+ token_(),
+ get_offset_(0) {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kBufferSize);
+ buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE {
+ if (IsValidSharedMemoryId(shm_id))
+ return buffer_;
+ return NULL;
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs(uint32 offset) {
+ DCHECK_LT(offset, kBufferSize);
+ int8* buffer_memory = static_cast<int8*>(buffer_->memory());
+ return reinterpret_cast<T>(&buffer_memory[offset]);
+ }
+
+ int32 GetSharedMemoryOffset(const void* memory) {
+ int8* buffer_memory = static_cast<int8*>(buffer_->memory());
+ ptrdiff_t offset = static_cast<const int8*>(memory) - &buffer_memory[0];
+ DCHECK_GE(offset, 0);
+ DCHECK_LT(static_cast<size_t>(offset), kBufferSize);
+ return static_cast<int32>(offset);
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual void set_token(int32 token) OVERRIDE {
+ token_ = token;
+ }
+
+ int32 token() const {
+ return token_;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) OVERRIDE {
+ NOTREACHED();
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE {
+ if (static_cast<size_t>(offset) < kBufferSize) {
+ get_offset_ = offset;
+ return true;
+ }
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE {
+ return get_offset_;
+ }
+
+ private:
+ bool IsValidSharedMemoryId(int32 shm_id) {
+ return shm_id == kValidShmId || shm_id == kStartValidShmId;
+ }
+
+ scoped_refptr<gpu::Buffer> buffer_;
+ int32 token_;
+ int32 get_offset_;
+};
+
+const int32 MockCommandBufferEngine::kStartValidShmId;
+const int32 MockCommandBufferEngine::kValidShmId;
+const int32 MockCommandBufferEngine::kInvalidShmId;
+const size_t MockCommandBufferEngine::kBufferSize;
+const int32 MockCommandBufferEngine::kValidOffset;
+const int32 MockCommandBufferEngine::kInvalidOffset;
+
+class CommonDecoderTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ decoder_.set_engine(&engine_);
+ }
+
+ virtual void TearDown() {
+ }
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ return decoder_.DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd)), 0);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return decoder_.DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd) + data_size), 0);
+ }
+
+ MockCommandBufferEngine engine_;
+ TestCommonDecoder decoder_;
+};
+
+TEST_F(CommonDecoderTest, Initialize) {
+ EXPECT_EQ(0, engine_.GetGetOffset());
+}
+
+TEST_F(CommonDecoderTest, DoCommonCommandInvalidCommand) {
+ EXPECT_EQ(error::kUnknownCommand, decoder_.DoCommand(999999, 0, NULL));
+}
+
+TEST_F(CommonDecoderTest, HandleNoop) {
+ cmd::Noop cmd;
+ const uint32 kSkipCount = 5;
+ cmd.Init(kSkipCount);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(
+ cmd, kSkipCount * kCommandBufferEntrySize));
+ const uint32 kSkipCount2 = 1;
+ cmd.Init(kSkipCount2);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(
+ cmd, kSkipCount2 * kCommandBufferEntrySize));
+}
+
+TEST_F(CommonDecoderTest, SetToken) {
+ cmd::SetToken cmd;
+ const int32 kTokenId = 123;
+ EXPECT_EQ(0, engine_.token());
+ cmd.Init(kTokenId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kTokenId, engine_.token());
+}
+
+TEST_F(CommonDecoderTest, SetBucketSize) {
+ cmd::SetBucketSize cmd;
+ const uint32 kBucketId = 123;
+ const uint32 kBucketLength1 = 1234;
+ const uint32 kBucketLength2 = 78;
+ // Check the bucket does not exist.
+ EXPECT_TRUE(NULL == decoder_.GetBucket(kBucketId));
+ // Check we can create one.
+ cmd.Init(kBucketId, kBucketLength1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket;
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_TRUE(NULL != bucket);
+ EXPECT_EQ(kBucketLength1, bucket->size());
+ // Check we can change it.
+ cmd.Init(kBucketId, kBucketLength2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_TRUE(NULL != bucket);
+ EXPECT_EQ(kBucketLength2, bucket->size());
+ // Check we can delete it.
+ cmd.Init(kBucketId, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_EQ(0u, bucket->size());
+}
+
+TEST_F(CommonDecoderTest, SetBucketData) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData cmd;
+
+ static const char kData[] = "1234567890123456789";
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
+ // Check the data is not there.
+ EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it.
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ void* memory = engine_.GetSharedMemoryAs<void*>(kSomeOffsetInSharedMemory);
+ memcpy(memory, kData, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it partially.
+ static const char kData2[] = "ABCEDFG";
+ const uint32 kSomeOffsetInBucket = 5;
+ memcpy(memory, kData2, sizeof(kData2));
+ cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
+ kData2, sizeof(kData2)));
+ const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
+ // Check that nothing was affected outside of updated area.
+ EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
+ bucket_data[kSomeOffsetInBucket - 1]);
+ EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
+ bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the offset is out of range.
+ cmd.Init(kBucketId, bucket->size(), 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the size is out of range.
+ cmd.Init(kBucketId, 0, bucket->size() + 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_F(CommonDecoderTest, SetBucketDataImmediate) {
+ cmd::SetBucketSize size_cmd;
+ int8 buffer[1024];
+ cmd::SetBucketDataImmediate& cmd =
+ *reinterpret_cast<cmd::SetBucketDataImmediate*>(&buffer);
+
+ static const char kData[] = "1234567890123456789";
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
+ // Check the data is not there.
+ EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it.
+ void* memory = &buffer[0] + sizeof(cmd);
+ memcpy(memory, kData, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData));
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData)));
+ EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it partially.
+ static const char kData2[] = "ABCEDFG";
+ const uint32 kSomeOffsetInBucket = 5;
+ memcpy(memory, kData2, sizeof(kData2));
+ cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2));
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+ EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
+ kData2, sizeof(kData2)));
+ const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
+ // Check that nothing was affected outside of updated area.
+ EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
+ bucket_data[kSomeOffsetInBucket - 1]);
+ EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
+ bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2));
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+
+ // Check that it fails if the offset is out of range.
+ cmd.Init(kBucketId, bucket->size(), 1);
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+
+ // Check that it fails if the size is out of range.
+ cmd.Init(kBucketId, 0, bucket->size() + 1);
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+}
+
+TEST_F(CommonDecoderTest, GetBucketStart) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData set_cmd;
+ cmd::GetBucketStart cmd;
+
+ static const char kData[] = "1234567890123456789";
+ static const char zero[sizeof(kData)] = { 0, };
+
+ const uint32 kBucketSize = sizeof(kData);
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ // Put data in the bucket.
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ uint8* start = engine_.GetSharedMemoryAs<uint8*>(kSomeOffsetInSharedMemory);
+ memcpy(start, kData, sizeof(kData));
+ set_cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
+
+ // Check that the size is correct with no data buffer.
+ uint32* memory =
+ engine_.GetSharedMemoryAs<uint32*>(kSomeOffsetInSharedMemory);
+ *memory = 0x0;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+
+ // Check that the data is copied with data buffer.
+ const uint32 kDataOffsetInSharedMemory = 54;
+ uint8* data = engine_.GetSharedMemoryAs<uint8*>(kDataOffsetInSharedMemory);
+ *memory = 0x0;
+ memset(data, 0, sizeof(kData));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ kBucketSize, MockCommandBufferEngine::kValidShmId,
+ kDataOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+ EXPECT_EQ(0, memcmp(data, kData, kBucketSize));
+
+ // Check that we can get a piece.
+ *memory = 0x0;
+ memset(data, 0, sizeof(kData));
+ const uint32 kPieceSize = kBucketSize / 2;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ kPieceSize, MockCommandBufferEngine::kValidShmId,
+ kDataOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+ EXPECT_EQ(0, memcmp(data, kData, kPieceSize));
+ EXPECT_EQ(0, memcmp(data + kPieceSize, zero, sizeof(kData) - kPieceSize));
+
+ // Check that it fails if the result_id is invalid
+ cmd.Init(kInvalidBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_id is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 1, MockCommandBufferEngine::kInvalidShmId, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_size is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 1, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ MockCommandBufferEngine::kBufferSize + 1,
+ MockCommandBufferEngine::kValidShmId, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_offset is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 1);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ MockCommandBufferEngine::kBufferSize,
+ MockCommandBufferEngine::kValidShmId, 1);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the result size is not set to zero
+ *memory = 0x1;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_F(CommonDecoderTest, GetBucketData) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData set_cmd;
+ cmd::GetBucketData cmd;
+
+ static const char kData[] = "1234567890123456789";
+ static const char zero[sizeof(kData)] = { 0, };
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ uint8* memory = engine_.GetSharedMemoryAs<uint8*>(kSomeOffsetInSharedMemory);
+ memcpy(memory, kData, sizeof(kData));
+ set_cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
+
+ // Check we can get the whole thing.
+ memset(memory, 0, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(memory, kData, sizeof(kData)));
+
+ // Check we can get a piece.
+ const uint32 kSomeOffsetInBucket = 5;
+ const uint32 kLengthOfPiece = 6;
+ const uint8 kSentinel = 0xff;
+ memset(memory, 0, sizeof(kData));
+ memory[-1] = kSentinel;
+ cmd.Init(kBucketId, kSomeOffsetInBucket, kLengthOfPiece,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(memory, kData + kSomeOffsetInBucket, kLengthOfPiece));
+ EXPECT_EQ(0, memcmp(memory + kLengthOfPiece, zero,
+ sizeof(kData) - kLengthOfPiece));
+ EXPECT_EQ(kSentinel, memory[-1]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the offset is invalid
+ cmd.Init(kBucketId, sizeof(kData) + 1, 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the size is invalid
+ cmd.Init(kBucketId, 0, sizeof(kData) + 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/context_group.cc b/gpu/command_buffer/service/context_group.cc
new file mode 100644
index 0000000..fe692be
--- /dev/null
+++ b/gpu/command_buffer/service/context_group.cc
@@ -0,0 +1,380 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_group.h"
+
+#include <algorithm>
+#include <string>
+
+#include "base/command_line.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+ContextGroup::ContextGroup(
+ const scoped_refptr<MailboxManager>& mailbox_manager,
+ const scoped_refptr<MemoryTracker>& memory_tracker,
+ const scoped_refptr<ShaderTranslatorCache>& shader_translator_cache,
+ const scoped_refptr<FeatureInfo>& feature_info,
+ bool bind_generates_resource)
+ : mailbox_manager_(mailbox_manager),
+ memory_tracker_(memory_tracker),
+ shader_translator_cache_(shader_translator_cache),
+ enforce_gl_minimums_(CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnforceGLMinimums)),
+ bind_generates_resource_(bind_generates_resource),
+ max_vertex_attribs_(0u),
+ max_texture_units_(0u),
+ max_texture_image_units_(0u),
+ max_vertex_texture_image_units_(0u),
+ max_fragment_uniform_vectors_(0u),
+ max_varying_vectors_(0u),
+ max_vertex_uniform_vectors_(0u),
+ max_color_attachments_(1u),
+ max_draw_buffers_(1u),
+ program_cache_(NULL),
+ feature_info_(feature_info),
+ draw_buffer_(GL_BACK) {
+ {
+ if (!mailbox_manager_.get())
+ mailbox_manager_ = new MailboxManager;
+ if (!feature_info.get())
+ feature_info_ = new FeatureInfo;
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ manager->Initialize();
+ }
+}
+
+static void GetIntegerv(GLenum pname, uint32* var) {
+ GLint value = 0;
+ glGetIntegerv(pname, &value);
+ *var = value;
+}
+
+bool ContextGroup::Initialize(
+ GLES2Decoder* decoder,
+ const DisallowedFeatures& disallowed_features) {
+ // If we've already initialized the group just add the context.
+ if (HaveContexts()) {
+ decoders_.push_back(base::AsWeakPtr<GLES2Decoder>(decoder));
+ return true;
+ }
+
+ if (!feature_info_->Initialize(disallowed_features)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because FeatureInfo "
+ << "initialization failed.";
+ return false;
+ }
+
+ const GLint kMinRenderbufferSize = 512; // GL says 1 pixel!
+ GLint max_renderbuffer_size = 0;
+ if (!QueryGLFeature(
+ GL_MAX_RENDERBUFFER_SIZE, kMinRenderbufferSize,
+ &max_renderbuffer_size)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because maximum "
+ << "renderbuffer size too small.";
+ return false;
+ }
+ GLint max_samples = 0;
+ if (feature_info_->feature_flags().chromium_framebuffer_multisample ||
+ feature_info_->feature_flags().multisampled_render_to_texture) {
+ if (feature_info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture) {
+ glGetIntegerv(GL_MAX_SAMPLES_IMG, &max_samples);
+ } else {
+ glGetIntegerv(GL_MAX_SAMPLES, &max_samples);
+ }
+ }
+
+ if (feature_info_->feature_flags().ext_draw_buffers) {
+ GetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, &max_color_attachments_);
+ if (max_color_attachments_ < 1)
+ max_color_attachments_ = 1;
+ GetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &max_draw_buffers_);
+ if (max_draw_buffers_ < 1)
+ max_draw_buffers_ = 1;
+ draw_buffer_ = GL_BACK;
+ }
+
+ const bool depth24_supported = feature_info_->feature_flags().oes_depth24;
+
+ buffer_manager_.reset(
+ new BufferManager(memory_tracker_.get(), feature_info_.get()));
+ framebuffer_manager_.reset(
+ new FramebufferManager(max_draw_buffers_, max_color_attachments_));
+ renderbuffer_manager_.reset(new RenderbufferManager(
+ memory_tracker_.get(), max_renderbuffer_size, max_samples,
+ depth24_supported));
+ shader_manager_.reset(new ShaderManager());
+
+ // Lookup GL things we need to know.
+ const GLint kGLES2RequiredMinimumVertexAttribs = 8u;
+ if (!QueryGLFeatureU(
+ GL_MAX_VERTEX_ATTRIBS, kGLES2RequiredMinimumVertexAttribs,
+ &max_vertex_attribs_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "vertex attributes supported.";
+ return false;
+ }
+
+ const GLuint kGLES2RequiredMinimumTextureUnits = 8u;
+ if (!QueryGLFeatureU(
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, kGLES2RequiredMinimumTextureUnits,
+ &max_texture_units_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "texture units supported.";
+ return false;
+ }
+
+ GLint max_texture_size = 0;
+ GLint max_cube_map_texture_size = 0;
+ const GLint kMinTextureSize = 2048; // GL actually says 64!?!?
+ const GLint kMinCubeMapSize = 256; // GL actually says 16!?!?
+ if (!QueryGLFeature(
+ GL_MAX_TEXTURE_SIZE, kMinTextureSize, &max_texture_size) ||
+ !QueryGLFeature(
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE, kMinCubeMapSize,
+ &max_cube_map_texture_size)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because maximum texture size"
+ << "is too small.";
+ return false;
+ }
+
+ if (feature_info_->workarounds().max_texture_size) {
+ max_texture_size = std::min(
+ max_texture_size, feature_info_->workarounds().max_texture_size);
+ }
+ if (feature_info_->workarounds().max_cube_map_texture_size) {
+ max_cube_map_texture_size = std::min(
+ max_cube_map_texture_size,
+ feature_info_->workarounds().max_cube_map_texture_size);
+ }
+
+ texture_manager_.reset(new TextureManager(memory_tracker_.get(),
+ feature_info_.get(),
+ max_texture_size,
+ max_cube_map_texture_size,
+ bind_generates_resource_));
+ texture_manager_->set_framebuffer_manager(framebuffer_manager_.get());
+
+ const GLint kMinTextureImageUnits = 8;
+ const GLint kMinVertexTextureImageUnits = 0;
+ if (!QueryGLFeatureU(
+ GL_MAX_TEXTURE_IMAGE_UNITS, kMinTextureImageUnits,
+ &max_texture_image_units_) ||
+ !QueryGLFeatureU(
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, kMinVertexTextureImageUnits,
+ &max_vertex_texture_image_units_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "texture units.";
+ return false;
+ }
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ GetIntegerv(GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ &max_fragment_uniform_vectors_);
+ GetIntegerv(GL_MAX_VARYING_VECTORS, &max_varying_vectors_);
+ GetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &max_vertex_uniform_vectors_);
+ } else {
+ GetIntegerv(
+ GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max_fragment_uniform_vectors_);
+ max_fragment_uniform_vectors_ /= 4;
+ GetIntegerv(GL_MAX_VARYING_FLOATS, &max_varying_vectors_);
+ max_varying_vectors_ /= 4;
+ GetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS, &max_vertex_uniform_vectors_);
+ max_vertex_uniform_vectors_ /= 4;
+ }
+
+ const GLint kMinFragmentUniformVectors = 16;
+ const GLint kMinVaryingVectors = 8;
+ const GLint kMinVertexUniformVectors = 128;
+ if (!CheckGLFeatureU(
+ kMinFragmentUniformVectors, &max_fragment_uniform_vectors_) ||
+ !CheckGLFeatureU(kMinVaryingVectors, &max_varying_vectors_) ||
+ !CheckGLFeatureU(
+ kMinVertexUniformVectors, &max_vertex_uniform_vectors_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "uniforms or varyings supported.";
+ return false;
+ }
+
+ // Some shaders in Skia need more than the min available vertex and
+ // fragment shader uniform vectors in case of OSMesa GL Implementation
+ if (feature_info_->workarounds().max_fragment_uniform_vectors) {
+ max_fragment_uniform_vectors_ = std::min(
+ max_fragment_uniform_vectors_,
+ static_cast<uint32>(
+ feature_info_->workarounds().max_fragment_uniform_vectors));
+ }
+ if (feature_info_->workarounds().max_varying_vectors) {
+ max_varying_vectors_ = std::min(
+ max_varying_vectors_,
+ static_cast<uint32>(feature_info_->workarounds().max_varying_vectors));
+ }
+ if (feature_info_->workarounds().max_vertex_uniform_vectors) {
+ max_vertex_uniform_vectors_ =
+ std::min(max_vertex_uniform_vectors_,
+ static_cast<uint32>(
+ feature_info_->workarounds().max_vertex_uniform_vectors));
+ }
+
+ program_manager_.reset(new ProgramManager(
+ program_cache_, max_varying_vectors_));
+
+ if (!texture_manager_->Initialize()) {
+ LOG(ERROR) << "Context::Group::Initialize failed because texture manager "
+ << "failed to initialize.";
+ return false;
+ }
+
+ decoders_.push_back(base::AsWeakPtr<GLES2Decoder>(decoder));
+ return true;
+}
+
+namespace {
+
+bool IsNull(const base::WeakPtr<gles2::GLES2Decoder>& decoder) {
+ return !decoder.get();
+}
+
+template <typename T>
+class WeakPtrEquals {
+ public:
+ explicit WeakPtrEquals(T* t) : t_(t) {}
+
+ bool operator()(const base::WeakPtr<T>& t) {
+ return t.get() == t_;
+ }
+
+ private:
+ T* const t_;
+};
+
+} // namespace anonymous
+
+bool ContextGroup::HaveContexts() {
+ decoders_.erase(std::remove_if(decoders_.begin(), decoders_.end(), IsNull),
+ decoders_.end());
+ return !decoders_.empty();
+}
+
+void ContextGroup::Destroy(GLES2Decoder* decoder, bool have_context) {
+ decoders_.erase(std::remove_if(decoders_.begin(), decoders_.end(),
+ WeakPtrEquals<gles2::GLES2Decoder>(decoder)),
+ decoders_.end());
+ // If we still have contexts do nothing.
+ if (HaveContexts()) {
+ return;
+ }
+
+ if (buffer_manager_ != NULL) {
+ buffer_manager_->Destroy(have_context);
+ buffer_manager_.reset();
+ }
+
+ if (framebuffer_manager_ != NULL) {
+ framebuffer_manager_->Destroy(have_context);
+ if (texture_manager_)
+ texture_manager_->set_framebuffer_manager(NULL);
+ framebuffer_manager_.reset();
+ }
+
+ if (renderbuffer_manager_ != NULL) {
+ renderbuffer_manager_->Destroy(have_context);
+ renderbuffer_manager_.reset();
+ }
+
+ if (texture_manager_ != NULL) {
+ texture_manager_->Destroy(have_context);
+ texture_manager_.reset();
+ }
+
+ if (program_manager_ != NULL) {
+ program_manager_->Destroy(have_context);
+ program_manager_.reset();
+ }
+
+ if (shader_manager_ != NULL) {
+ shader_manager_->Destroy(have_context);
+ shader_manager_.reset();
+ }
+
+ memory_tracker_ = NULL;
+}
+
+uint32 ContextGroup::GetMemRepresented() const {
+ uint32 total = 0;
+ if (buffer_manager_.get())
+ total += buffer_manager_->mem_represented();
+ if (renderbuffer_manager_.get())
+ total += renderbuffer_manager_->mem_represented();
+ if (texture_manager_.get())
+ total += texture_manager_->mem_represented();
+ return total;
+}
+
+void ContextGroup::LoseContexts(GLenum reset_status) {
+ for (size_t ii = 0; ii < decoders_.size(); ++ii) {
+ if (decoders_[ii].get()) {
+ decoders_[ii]->LoseContext(reset_status);
+ }
+ }
+}
+
+ContextGroup::~ContextGroup() {
+ CHECK(!HaveContexts());
+}
+
+bool ContextGroup::CheckGLFeature(GLint min_required, GLint* v) {
+ GLint value = *v;
+ if (enforce_gl_minimums_) {
+ value = std::min(min_required, value);
+ }
+ *v = value;
+ return value >= min_required;
+}
+
+bool ContextGroup::CheckGLFeatureU(GLint min_required, uint32* v) {
+ GLint value = *v;
+ if (enforce_gl_minimums_) {
+ value = std::min(min_required, value);
+ }
+ *v = value;
+ return value >= min_required;
+}
+
+bool ContextGroup::QueryGLFeature(
+ GLenum pname, GLint min_required, GLint* v) {
+ GLint value = 0;
+ glGetIntegerv(pname, &value);
+ *v = value;
+ return CheckGLFeature(min_required, v);
+}
+
+bool ContextGroup::QueryGLFeatureU(
+ GLenum pname, GLint min_required, uint32* v) {
+ uint32 value = 0;
+ GetIntegerv(pname, &value);
+ bool result = CheckGLFeatureU(min_required, &value);
+ *v = value;
+ return result;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/context_group.h b/gpu/command_buffer/service/context_group.h
new file mode 100644
index 0000000..ae4550c
--- /dev/null
+++ b/gpu/command_buffer/service/context_group.h
@@ -0,0 +1,222 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class TransferBufferManagerInterface;
+
+namespace gles2 {
+
+class ProgramCache;
+class BufferManager;
+class GLES2Decoder;
+class FramebufferManager;
+class MailboxManager;
+class RenderbufferManager;
+class ProgramManager;
+class ShaderManager;
+class TextureManager;
+class MemoryTracker;
+struct DisallowedFeatures;
+
+// A Context Group helps manage multiple GLES2Decoders that share
+// resources.
+class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
+ public:
+ ContextGroup(
+ const scoped_refptr<MailboxManager>& mailbox_manager,
+ const scoped_refptr<MemoryTracker>& memory_tracker,
+ const scoped_refptr<ShaderTranslatorCache>& shader_translator_cache,
+ const scoped_refptr<FeatureInfo>& feature_info,
+ bool bind_generates_resource);
+
+ // This should only be called by GLES2Decoder. This must be paired with a
+ // call to destroy if it succeeds.
+ bool Initialize(
+ GLES2Decoder* decoder,
+ const DisallowedFeatures& disallowed_features);
+
+ // Destroys all the resources when called for the last context in the group.
+ // It should only be called by GLES2Decoder.
+ void Destroy(GLES2Decoder* decoder, bool have_context);
+
+ MailboxManager* mailbox_manager() const {
+ return mailbox_manager_.get();
+ }
+
+ MemoryTracker* memory_tracker() const {
+ return memory_tracker_.get();
+ }
+
+ ShaderTranslatorCache* shader_translator_cache() const {
+ return shader_translator_cache_.get();
+ }
+
+ bool bind_generates_resource() {
+ return bind_generates_resource_;
+ }
+
+ uint32 max_vertex_attribs() const {
+ return max_vertex_attribs_;
+ }
+
+ uint32 max_texture_units() const {
+ return max_texture_units_;
+ }
+
+ uint32 max_texture_image_units() const {
+ return max_texture_image_units_;
+ }
+
+ uint32 max_vertex_texture_image_units() const {
+ return max_vertex_texture_image_units_;
+ }
+
+ uint32 max_fragment_uniform_vectors() const {
+ return max_fragment_uniform_vectors_;
+ }
+
+ uint32 max_varying_vectors() const {
+ return max_varying_vectors_;
+ }
+
+ uint32 max_vertex_uniform_vectors() const {
+ return max_vertex_uniform_vectors_;
+ }
+
+ uint32 max_color_attachments() const {
+ return max_color_attachments_;
+ }
+
+ uint32 max_draw_buffers() const {
+ return max_draw_buffers_;
+ }
+
+ FeatureInfo* feature_info() {
+ return feature_info_.get();
+ }
+
+ BufferManager* buffer_manager() const {
+ return buffer_manager_.get();
+ }
+
+ FramebufferManager* framebuffer_manager() const {
+ return framebuffer_manager_.get();
+ }
+
+ RenderbufferManager* renderbuffer_manager() const {
+ return renderbuffer_manager_.get();
+ }
+
+ TextureManager* texture_manager() const {
+ return texture_manager_.get();
+ }
+
+ ProgramManager* program_manager() const {
+ return program_manager_.get();
+ }
+
+ bool has_program_cache() const {
+ return program_cache_ != NULL;
+ }
+
+ void set_program_cache(ProgramCache* program_cache) {
+ program_cache_ = program_cache;
+ }
+
+ ShaderManager* shader_manager() const {
+ return shader_manager_.get();
+ }
+
+ TransferBufferManagerInterface* transfer_buffer_manager() const {
+ return transfer_buffer_manager_.get();
+ }
+
+ uint32 GetMemRepresented() const;
+
+ // Loses all the context associated with this group.
+ void LoseContexts(GLenum reset_status);
+
+ // EXT_draw_buffer related states for backbuffer.
+ GLenum draw_buffer() const {
+ return draw_buffer_;
+ }
+ void set_draw_buffer(GLenum buf) {
+ draw_buffer_ = buf;
+ }
+
+ private:
+ friend class base::RefCounted<ContextGroup>;
+ ~ContextGroup();
+
+ bool CheckGLFeature(GLint min_required, GLint* v);
+ bool CheckGLFeatureU(GLint min_required, uint32* v);
+ bool QueryGLFeature(GLenum pname, GLint min_required, GLint* v);
+ bool QueryGLFeatureU(GLenum pname, GLint min_required, uint32* v);
+ bool HaveContexts();
+
+ scoped_refptr<MailboxManager> mailbox_manager_;
+ scoped_refptr<MemoryTracker> memory_tracker_;
+ scoped_refptr<ShaderTranslatorCache> shader_translator_cache_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+
+ bool enforce_gl_minimums_;
+ bool bind_generates_resource_;
+
+ uint32 max_vertex_attribs_;
+ uint32 max_texture_units_;
+ uint32 max_texture_image_units_;
+ uint32 max_vertex_texture_image_units_;
+ uint32 max_fragment_uniform_vectors_;
+ uint32 max_varying_vectors_;
+ uint32 max_vertex_uniform_vectors_;
+ uint32 max_color_attachments_;
+ uint32 max_draw_buffers_;
+
+ ProgramCache* program_cache_;
+
+ scoped_ptr<BufferManager> buffer_manager_;
+
+ scoped_ptr<FramebufferManager> framebuffer_manager_;
+
+ scoped_ptr<RenderbufferManager> renderbuffer_manager_;
+
+ scoped_ptr<TextureManager> texture_manager_;
+
+ scoped_ptr<ProgramManager> program_manager_;
+
+ scoped_ptr<ShaderManager> shader_manager_;
+
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ std::vector<base::WeakPtr<gles2::GLES2Decoder> > decoders_;
+
+ GLenum draw_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContextGroup);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+
+
diff --git a/gpu/command_buffer/service/context_group_unittest.cc b/gpu/command_buffer/service/context_group_unittest.cc
new file mode 100644
index 0000000..7aa1301
--- /dev/null
+++ b/gpu/command_buffer/service/context_group_unittest.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_group.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Not;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+class ContextGroupTest : public GpuServiceTest {
+ public:
+ static const bool kBindGeneratesResource = false;
+
+ ContextGroupTest() {}
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ decoder_.reset(new MockGLES2Decoder());
+ group_ = scoped_refptr<ContextGroup>(
+ new ContextGroup(NULL, NULL, NULL, NULL, kBindGeneratesResource));
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_refptr<ContextGroup> group_;
+};
+
+TEST_F(ContextGroupTest, Basic) {
+ // Test it starts off uninitialized.
+ EXPECT_EQ(0u, group_->max_vertex_attribs());
+ EXPECT_EQ(0u, group_->max_texture_units());
+ EXPECT_EQ(0u, group_->max_texture_image_units());
+ EXPECT_EQ(0u, group_->max_vertex_texture_image_units());
+ EXPECT_EQ(0u, group_->max_fragment_uniform_vectors());
+ EXPECT_EQ(0u, group_->max_varying_vectors());
+ EXPECT_EQ(0u, group_->max_vertex_uniform_vectors());
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+TEST_F(ContextGroupTest, InitializeNoExtensions) {
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(), DisallowedFeatures(), "", "", kBindGeneratesResource);
+ group_->Initialize(decoder_.get(), DisallowedFeatures());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kNumVertexAttribs),
+ group_->max_vertex_attribs());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kNumTextureUnits),
+ group_->max_texture_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxTextureImageUnits),
+ group_->max_texture_image_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVertexTextureImageUnits),
+ group_->max_vertex_texture_image_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxFragmentUniformVectors),
+ group_->max_fragment_uniform_vectors());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVaryingVectors),
+ group_->max_varying_vectors());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVertexUniformVectors),
+ group_->max_vertex_uniform_vectors());
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder_.get(), false);
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+TEST_F(ContextGroupTest, MultipleContexts) {
+ scoped_ptr<MockGLES2Decoder> decoder2_(new MockGLES2Decoder());
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(), DisallowedFeatures(), "", "", kBindGeneratesResource);
+ group_->Initialize(decoder_.get(), DisallowedFeatures());
+ group_->Initialize(decoder2_.get(), DisallowedFeatures());
+
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder_.get(), false);
+
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder2_.get(), false);
+
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/context_state.cc b/gpu/command_buffer/service/context_state.cc
new file mode 100644
index 0000000..eb7fc49
--- /dev/null
+++ b/gpu/command_buffer/service/context_state.cc
@@ -0,0 +1,302 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_state.h"
+
+#include <cmath>
+
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+static void EnableDisable(GLenum pname, bool enable) {
+ if (enable) {
+ glEnable(pname);
+ } else {
+ glDisable(pname);
+ }
+}
+
+GLuint Get2dServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_2d.get()
+ ? unit.bound_texture_2d->service_id() : 0;
+}
+
+GLuint GetCubeServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_cube_map.get()
+ ? unit.bound_texture_cube_map->service_id() : 0;
+}
+
+GLuint GetOesServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_external_oes.get()
+ ? unit.bound_texture_external_oes->service_id() : 0;
+}
+
+GLuint GetArbServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_rectangle_arb.get()
+ ? unit.bound_texture_rectangle_arb->service_id() : 0;
+}
+
+GLuint GetServiceId(const TextureUnit& unit, GLuint target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return Get2dServiceId(unit);
+ case GL_TEXTURE_CUBE_MAP:
+ return GetCubeServiceId(unit);
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return GetArbServiceId(unit);
+ case GL_TEXTURE_EXTERNAL_OES:
+ return GetOesServiceId(unit);
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+bool TargetIsSupported(const FeatureInfo* feature_info, GLuint target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return true;
+ case GL_TEXTURE_CUBE_MAP:
+ return true;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return feature_info->feature_flags().arb_texture_rectangle;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return feature_info->feature_flags().oes_egl_image_external;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+} // anonymous namespace.
+
+TextureUnit::TextureUnit()
+ : bind_target(GL_TEXTURE_2D) {
+}
+
+TextureUnit::~TextureUnit() {
+}
+
+ContextState::ContextState(FeatureInfo* feature_info,
+ ErrorStateClient* error_state_client,
+ Logger* logger)
+ : active_texture_unit(0),
+ bound_renderbuffer_valid(false),
+ pack_reverse_row_order(false),
+ ignore_cached_state(false),
+ fbo_binding_for_scissor_workaround_dirty_(false),
+ feature_info_(feature_info),
+ error_state_(ErrorState::Create(error_state_client, logger)) {
+ Initialize();
+}
+
+ContextState::~ContextState() {
+}
+
+void ContextState::RestoreTextureUnitBindings(
+ GLuint unit, const ContextState* prev_state) const {
+ DCHECK_LT(unit, texture_units.size());
+ const TextureUnit& texture_unit = texture_units[unit];
+ GLuint service_id_2d = Get2dServiceId(texture_unit);
+ GLuint service_id_cube = GetCubeServiceId(texture_unit);
+ GLuint service_id_oes = GetOesServiceId(texture_unit);
+ GLuint service_id_arb = GetArbServiceId(texture_unit);
+
+ bool bind_texture_2d = true;
+ bool bind_texture_cube = true;
+ bool bind_texture_oes = feature_info_->feature_flags().oes_egl_image_external;
+ bool bind_texture_arb = feature_info_->feature_flags().arb_texture_rectangle;
+
+ if (prev_state) {
+ const TextureUnit& prev_unit = prev_state->texture_units[unit];
+ bind_texture_2d = service_id_2d != Get2dServiceId(prev_unit);
+ bind_texture_cube = service_id_cube != GetCubeServiceId(prev_unit);
+ bind_texture_oes =
+ bind_texture_oes && service_id_oes != GetOesServiceId(prev_unit);
+ bind_texture_arb =
+ bind_texture_arb && service_id_arb != GetArbServiceId(prev_unit);
+ }
+
+ // Early-out if nothing has changed from the previous state.
+ if (!bind_texture_2d && !bind_texture_cube
+ && !bind_texture_oes && !bind_texture_arb) {
+ return;
+ }
+
+ glActiveTexture(GL_TEXTURE0 + unit);
+ if (bind_texture_2d) {
+ glBindTexture(GL_TEXTURE_2D, service_id_2d);
+ }
+ if (bind_texture_cube) {
+ glBindTexture(GL_TEXTURE_CUBE_MAP, service_id_cube);
+ }
+ if (bind_texture_oes) {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, service_id_oes);
+ }
+ if (bind_texture_arb) {
+ glBindTexture(GL_TEXTURE_RECTANGLE_ARB, service_id_arb);
+ }
+}
+
+void ContextState::RestoreBufferBindings() const {
+ if (vertex_attrib_manager.get()) {
+ Buffer* element_array_buffer =
+ vertex_attrib_manager->element_array_buffer();
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer ? element_array_buffer->service_id() : 0);
+ }
+ glBindBuffer(GL_ARRAY_BUFFER,
+ bound_array_buffer.get() ? bound_array_buffer->service_id() : 0);
+}
+
+void ContextState::RestoreRenderbufferBindings() {
+ // Require Renderbuffer rebind.
+ bound_renderbuffer_valid = false;
+}
+
+void ContextState::RestoreProgramBindings() const {
+ glUseProgram(current_program.get() ? current_program->service_id() : 0);
+}
+
+void ContextState::RestoreActiveTexture() const {
+ glActiveTexture(GL_TEXTURE0 + active_texture_unit);
+}
+
+void ContextState::RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const {
+ // Restore Texture state.
+ for (size_t ii = 0; ii < texture_units.size(); ++ii) {
+ RestoreTextureUnitBindings(ii, prev_state);
+ }
+ RestoreActiveTexture();
+}
+
+void ContextState::RestoreActiveTextureUnitBinding(unsigned int target) const {
+ DCHECK_LT(active_texture_unit, texture_units.size());
+ const TextureUnit& texture_unit = texture_units[active_texture_unit];
+ if (TargetIsSupported(feature_info_, target))
+ glBindTexture(target, GetServiceId(texture_unit, target));
+}
+
+void ContextState::RestoreVertexAttribValues() const {
+ for (size_t attrib = 0; attrib < vertex_attrib_manager->num_attribs();
+ ++attrib) {
+ glVertexAttrib4fv(attrib, attrib_values[attrib].v);
+ }
+}
+
+void ContextState::RestoreVertexAttribArrays(
+ const scoped_refptr<VertexAttribManager> attrib_manager) const {
+ // This is expected to be called only for VAO with service_id 0,
+ // either to restore the default VAO or a virtual VAO with service_id 0.
+ GLuint vao_service_id = attrib_manager->service_id();
+ DCHECK(vao_service_id == 0);
+
+ // Bind VAO if supported.
+ if (feature_info_->feature_flags().native_vertex_array_object)
+ glBindVertexArrayOES(vao_service_id);
+
+ // Restore vertex attrib arrays.
+ for (size_t attrib_index = 0; attrib_index < attrib_manager->num_attribs();
+ ++attrib_index) {
+ const VertexAttrib* attrib = attrib_manager->GetVertexAttrib(attrib_index);
+
+ // Restore vertex array.
+ Buffer* buffer = attrib->buffer();
+ GLuint buffer_service_id = buffer ? buffer->service_id() : 0;
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_service_id);
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ glVertexAttribPointer(attrib_index,
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+
+ // Restore attrib divisor if supported.
+ if (feature_info_->feature_flags().angle_instanced_arrays)
+ glVertexAttribDivisorANGLE(attrib_index, attrib->divisor());
+
+ // Never touch vertex attribute 0's state (in particular, never
+ // disable it) when running on desktop GL because it will never be
+ // re-enabled.
+ if (attrib_index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ if (attrib->enabled()) {
+ glEnableVertexAttribArray(attrib_index);
+ } else {
+ glDisableVertexAttribArray(attrib_index);
+ }
+ }
+ }
+}
+
+void ContextState::RestoreVertexAttribs() const {
+ // Restore Vertex Attrib Arrays
+ // TODO: This if should not be needed. RestoreState is getting called
+ // before GLES2Decoder::Initialize which is a bug.
+ if (vertex_attrib_manager.get()) {
+ // Restore VAOs.
+ if (feature_info_->feature_flags().native_vertex_array_object) {
+ // If default VAO is still using shared id 0 instead of unique ids
+ // per-context, default VAO state must be restored.
+ GLuint default_vao_service_id =
+ default_vertex_attrib_manager->service_id();
+ if (default_vao_service_id == 0)
+ RestoreVertexAttribArrays(default_vertex_attrib_manager);
+
+ // Restore the current VAO binding, unless it's the same as the
+ // default above.
+ GLuint curr_vao_service_id = vertex_attrib_manager->service_id();
+ if (curr_vao_service_id != 0)
+ glBindVertexArrayOES(curr_vao_service_id);
+ } else {
+ // If native VAO isn't supported, emulated VAOs are used.
+ // Restore to the currently bound VAO.
+ RestoreVertexAttribArrays(vertex_attrib_manager);
+ }
+ }
+
+ // glVertexAttrib4fv aren't part of VAO state and must be restored.
+ RestoreVertexAttribValues();
+}
+
+void ContextState::RestoreGlobalState(const ContextState* prev_state) const {
+ InitCapabilities(prev_state);
+ InitState(prev_state);
+}
+
+void ContextState::RestoreState(const ContextState* prev_state) {
+ RestoreAllTextureUnitBindings(prev_state);
+ RestoreVertexAttribs();
+ RestoreBufferBindings();
+ RestoreRenderbufferBindings();
+ RestoreProgramBindings();
+ RestoreGlobalState(prev_state);
+}
+
+ErrorState* ContextState::GetErrorState() {
+ return error_state_.get();
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/context_state_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/context_state.h b/gpu/command_buffer/service/context_state.h
new file mode 100644
index 0000000..7488f57
--- /dev/null
+++ b/gpu/command_buffer/service/context_state.h
@@ -0,0 +1,221 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ContextState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+
+#include <vector>
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Buffer;
+class ErrorState;
+class ErrorStateClient;
+class FeatureInfo;
+class Framebuffer;
+class Program;
+class Renderbuffer;
+
+// State associated with each texture unit.
+struct GPU_EXPORT TextureUnit {
+ TextureUnit();
+ ~TextureUnit();
+
+ // The last target that was bound to this texture unit.
+ GLenum bind_target;
+
+ // texture currently bound to this unit's GL_TEXTURE_2D with glBindTexture
+ scoped_refptr<TextureRef> bound_texture_2d;
+
+ // texture currently bound to this unit's GL_TEXTURE_CUBE_MAP with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_cube_map;
+
+ // texture currently bound to this unit's GL_TEXTURE_EXTERNAL_OES with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_external_oes;
+
+ // texture currently bound to this unit's GL_TEXTURE_RECTANGLE_ARB with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_rectangle_arb;
+
+ scoped_refptr<TextureRef> GetInfoForSamplerType(
+ GLenum type) {
+ DCHECK(type == GL_SAMPLER_2D || type == GL_SAMPLER_CUBE ||
+ type == GL_SAMPLER_EXTERNAL_OES || type == GL_SAMPLER_2D_RECT_ARB);
+ switch (type) {
+ case GL_SAMPLER_2D:
+ return bound_texture_2d;
+ case GL_SAMPLER_CUBE:
+ return bound_texture_cube_map;
+ case GL_SAMPLER_EXTERNAL_OES:
+ return bound_texture_external_oes;
+ case GL_SAMPLER_2D_RECT_ARB:
+ return bound_texture_rectangle_arb;
+ }
+
+ NOTREACHED();
+ return NULL;
+ }
+
+ void Unbind(TextureRef* texture) {
+ if (bound_texture_2d.get() == texture) {
+ bound_texture_2d = NULL;
+ }
+ if (bound_texture_cube_map.get() == texture) {
+ bound_texture_cube_map = NULL;
+ }
+ if (bound_texture_external_oes.get() == texture) {
+ bound_texture_external_oes = NULL;
+ }
+ }
+};
+
+struct Vec4 {
+ Vec4() {
+ v[0] = 0.0f;
+ v[1] = 0.0f;
+ v[2] = 0.0f;
+ v[3] = 1.0f;
+ }
+ float v[4];
+};
+
+struct GPU_EXPORT ContextState {
+ ContextState(FeatureInfo* feature_info,
+ ErrorStateClient* error_state_client,
+ Logger* logger);
+ ~ContextState();
+
+ void Initialize();
+
+ void SetIgnoreCachedStateForTest(bool ignore) {
+ ignore_cached_state = ignore;
+ }
+
+ void RestoreState(const ContextState* prev_state);
+ void InitCapabilities(const ContextState* prev_state) const;
+ void InitState(const ContextState* prev_state) const;
+
+ void RestoreActiveTexture() const;
+ void RestoreAllTextureUnitBindings(const ContextState* prev_state) const;
+ void RestoreActiveTextureUnitBinding(unsigned int target) const;
+ void RestoreVertexAttribValues() const;
+ void RestoreVertexAttribArrays(
+ const scoped_refptr<VertexAttribManager> attrib_manager) const;
+ void RestoreVertexAttribs() const;
+ void RestoreBufferBindings() const;
+ void RestoreGlobalState(const ContextState* prev_state) const;
+ void RestoreProgramBindings() const;
+ void RestoreRenderbufferBindings();
+ void RestoreTextureUnitBindings(
+ GLuint unit, const ContextState* prev_state) const;
+
+ // Helper for getting cached state.
+ bool GetStateAsGLint(
+ GLenum pname, GLint* params, GLsizei* num_written) const;
+ bool GetStateAsGLfloat(
+ GLenum pname, GLfloat* params, GLsizei* num_written) const;
+ bool GetEnabled(GLenum cap) const;
+
+ inline void SetDeviceColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ if (cached_color_mask_red == red && cached_color_mask_green == green &&
+ cached_color_mask_blue == blue && cached_color_mask_alpha == alpha &&
+ !ignore_cached_state)
+ return;
+ cached_color_mask_red = red;
+ cached_color_mask_green = green;
+ cached_color_mask_blue = blue;
+ cached_color_mask_alpha = alpha;
+ glColorMask(red, green, blue, alpha);
+ }
+
+ inline void SetDeviceDepthMask(GLboolean mask) {
+ if (cached_depth_mask == mask && !ignore_cached_state)
+ return;
+ cached_depth_mask = mask;
+ glDepthMask(mask);
+ }
+
+ inline void SetDeviceStencilMaskSeparate(GLenum op, GLuint mask) {
+ if (op == GL_FRONT) {
+ if (cached_stencil_front_writemask == mask && !ignore_cached_state)
+ return;
+ cached_stencil_front_writemask = mask;
+ } else if (op == GL_BACK) {
+ if (cached_stencil_back_writemask == mask && !ignore_cached_state)
+ return;
+ cached_stencil_back_writemask = mask;
+ } else {
+ NOTREACHED();
+ return;
+ }
+ glStencilMaskSeparate(op, mask);
+ }
+
+ ErrorState* GetErrorState();
+
+ #include "gpu/command_buffer/service/context_state_autogen.h"
+
+ EnableFlags enable_flags;
+
+ // Current active texture by 0 - n index.
+ // In other words, if we call glActiveTexture(GL_TEXTURE2) this value would
+ // be 2.
+ GLuint active_texture_unit;
+
+ // The currently bound array buffer. If this is 0 it is illegal to call
+ // glVertexAttribPointer.
+ scoped_refptr<Buffer> bound_array_buffer;
+
+ // Which textures are bound to texture units through glActiveTexture.
+ std::vector<TextureUnit> texture_units;
+
+ // The values for each attrib.
+ std::vector<Vec4> attrib_values;
+
+ // Class that manages vertex attribs.
+ scoped_refptr<VertexAttribManager> vertex_attrib_manager;
+ scoped_refptr<VertexAttribManager> default_vertex_attrib_manager;
+
+ // The program in use by glUseProgram
+ scoped_refptr<Program> current_program;
+
+ // The currently bound renderbuffer
+ scoped_refptr<Renderbuffer> bound_renderbuffer;
+ bool bound_renderbuffer_valid;
+
+ // A map of of target -> Query for current queries
+ typedef std::map<GLuint, scoped_refptr<QueryManager::Query> > QueryMap;
+ QueryMap current_queries;
+
+ bool pack_reverse_row_order;
+ bool ignore_cached_state;
+
+ mutable bool fbo_binding_for_scissor_workaround_dirty_;
+ FeatureInfo* feature_info_;
+
+ private:
+ scoped_ptr<ErrorState> error_state_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+
diff --git a/gpu/command_buffer/service/context_state_autogen.h b/gpu/command_buffer/service/context_state_autogen.h
new file mode 100644
index 0000000..fcae244
--- /dev/null
+++ b/gpu/command_buffer/service/context_state_autogen.h
@@ -0,0 +1,162 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by context_state.h
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
+
+struct EnableFlags {
+ EnableFlags();
+ bool blend;
+ bool cached_blend;
+ bool cull_face;
+ bool cached_cull_face;
+ bool depth_test;
+ bool cached_depth_test;
+ bool dither;
+ bool cached_dither;
+ bool polygon_offset_fill;
+ bool cached_polygon_offset_fill;
+ bool sample_alpha_to_coverage;
+ bool cached_sample_alpha_to_coverage;
+ bool sample_coverage;
+ bool cached_sample_coverage;
+ bool scissor_test;
+ bool cached_scissor_test;
+ bool stencil_test;
+ bool cached_stencil_test;
+};
+
+GLfloat blend_color_red;
+GLfloat blend_color_green;
+GLfloat blend_color_blue;
+GLfloat blend_color_alpha;
+GLenum blend_equation_rgb;
+GLenum blend_equation_alpha;
+GLenum blend_source_rgb;
+GLenum blend_dest_rgb;
+GLenum blend_source_alpha;
+GLenum blend_dest_alpha;
+GLfloat color_clear_red;
+GLfloat color_clear_green;
+GLfloat color_clear_blue;
+GLfloat color_clear_alpha;
+GLclampf depth_clear;
+GLint stencil_clear;
+GLboolean color_mask_red;
+GLboolean cached_color_mask_red;
+GLboolean color_mask_green;
+GLboolean cached_color_mask_green;
+GLboolean color_mask_blue;
+GLboolean cached_color_mask_blue;
+GLboolean color_mask_alpha;
+GLboolean cached_color_mask_alpha;
+GLenum cull_mode;
+GLenum depth_func;
+GLboolean depth_mask;
+GLboolean cached_depth_mask;
+GLclampf z_near;
+GLclampf z_far;
+GLenum front_face;
+GLenum hint_generate_mipmap;
+GLenum hint_fragment_shader_derivative;
+GLfloat line_width;
+GLfloat modelview_matrix[16];
+GLfloat projection_matrix[16];
+GLint pack_alignment;
+GLint unpack_alignment;
+GLfloat polygon_offset_factor;
+GLfloat polygon_offset_units;
+GLclampf sample_coverage_value;
+GLboolean sample_coverage_invert;
+GLint scissor_x;
+GLint scissor_y;
+GLsizei scissor_width;
+GLsizei scissor_height;
+GLenum stencil_front_func;
+GLint stencil_front_ref;
+GLuint stencil_front_mask;
+GLenum stencil_back_func;
+GLint stencil_back_ref;
+GLuint stencil_back_mask;
+GLuint stencil_front_writemask;
+GLuint cached_stencil_front_writemask;
+GLuint stencil_back_writemask;
+GLuint cached_stencil_back_writemask;
+GLenum stencil_front_fail_op;
+GLenum stencil_front_z_fail_op;
+GLenum stencil_front_z_pass_op;
+GLenum stencil_back_fail_op;
+GLenum stencil_back_z_fail_op;
+GLenum stencil_back_z_pass_op;
+GLint viewport_x;
+GLint viewport_y;
+GLsizei viewport_width;
+GLsizei viewport_height;
+
+inline void SetDeviceCapabilityState(GLenum cap, bool enable) {
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags.cached_blend == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_blend = enable;
+ break;
+ case GL_CULL_FACE:
+ if (enable_flags.cached_cull_face == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_cull_face = enable;
+ break;
+ case GL_DEPTH_TEST:
+ if (enable_flags.cached_depth_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_depth_test = enable;
+ break;
+ case GL_DITHER:
+ if (enable_flags.cached_dither == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_dither = enable;
+ break;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags.cached_polygon_offset_fill == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_polygon_offset_fill = enable;
+ break;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags.cached_sample_alpha_to_coverage == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_sample_alpha_to_coverage = enable;
+ break;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags.cached_sample_coverage == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_sample_coverage = enable;
+ break;
+ case GL_SCISSOR_TEST:
+ if (enable_flags.cached_scissor_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_scissor_test = enable;
+ break;
+ case GL_STENCIL_TEST:
+ if (enable_flags.cached_stencil_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_stencil_test = enable;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ if (enable)
+ glEnable(cap);
+ else
+ glDisable(cap);
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/context_state_impl_autogen.h b/gpu/command_buffer/service/context_state_impl_autogen.h
new file mode 100644
index 0000000..1b1e5fe
--- /dev/null
+++ b/gpu/command_buffer/service/context_state_impl_autogen.h
@@ -0,0 +1,1068 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by context_state.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
+
+ContextState::EnableFlags::EnableFlags()
+ : blend(false),
+ cached_blend(false),
+ cull_face(false),
+ cached_cull_face(false),
+ depth_test(false),
+ cached_depth_test(false),
+ dither(true),
+ cached_dither(true),
+ polygon_offset_fill(false),
+ cached_polygon_offset_fill(false),
+ sample_alpha_to_coverage(false),
+ cached_sample_alpha_to_coverage(false),
+ sample_coverage(false),
+ cached_sample_coverage(false),
+ scissor_test(false),
+ cached_scissor_test(false),
+ stencil_test(false),
+ cached_stencil_test(false) {
+}
+
+void ContextState::Initialize() {
+ blend_color_red = 0.0f;
+ blend_color_green = 0.0f;
+ blend_color_blue = 0.0f;
+ blend_color_alpha = 0.0f;
+ blend_equation_rgb = GL_FUNC_ADD;
+ blend_equation_alpha = GL_FUNC_ADD;
+ blend_source_rgb = GL_ONE;
+ blend_dest_rgb = GL_ZERO;
+ blend_source_alpha = GL_ONE;
+ blend_dest_alpha = GL_ZERO;
+ color_clear_red = 0.0f;
+ color_clear_green = 0.0f;
+ color_clear_blue = 0.0f;
+ color_clear_alpha = 0.0f;
+ depth_clear = 1.0f;
+ stencil_clear = 0;
+ color_mask_red = true;
+ cached_color_mask_red = true;
+ color_mask_green = true;
+ cached_color_mask_green = true;
+ color_mask_blue = true;
+ cached_color_mask_blue = true;
+ color_mask_alpha = true;
+ cached_color_mask_alpha = true;
+ cull_mode = GL_BACK;
+ depth_func = GL_LESS;
+ depth_mask = true;
+ cached_depth_mask = true;
+ z_near = 0.0f;
+ z_far = 1.0f;
+ front_face = GL_CCW;
+ hint_generate_mipmap = GL_DONT_CARE;
+ hint_fragment_shader_derivative = GL_DONT_CARE;
+ line_width = 1.0f;
+ modelview_matrix[0] = 1.0f;
+ modelview_matrix[1] = 0.0f;
+ modelview_matrix[2] = 0.0f;
+ modelview_matrix[3] = 0.0f;
+ modelview_matrix[4] = 0.0f;
+ modelview_matrix[5] = 1.0f;
+ modelview_matrix[6] = 0.0f;
+ modelview_matrix[7] = 0.0f;
+ modelview_matrix[8] = 0.0f;
+ modelview_matrix[9] = 0.0f;
+ modelview_matrix[10] = 1.0f;
+ modelview_matrix[11] = 0.0f;
+ modelview_matrix[12] = 0.0f;
+ modelview_matrix[13] = 0.0f;
+ modelview_matrix[14] = 0.0f;
+ modelview_matrix[15] = 1.0f;
+ projection_matrix[0] = 1.0f;
+ projection_matrix[1] = 0.0f;
+ projection_matrix[2] = 0.0f;
+ projection_matrix[3] = 0.0f;
+ projection_matrix[4] = 0.0f;
+ projection_matrix[5] = 1.0f;
+ projection_matrix[6] = 0.0f;
+ projection_matrix[7] = 0.0f;
+ projection_matrix[8] = 0.0f;
+ projection_matrix[9] = 0.0f;
+ projection_matrix[10] = 1.0f;
+ projection_matrix[11] = 0.0f;
+ projection_matrix[12] = 0.0f;
+ projection_matrix[13] = 0.0f;
+ projection_matrix[14] = 0.0f;
+ projection_matrix[15] = 1.0f;
+ pack_alignment = 4;
+ unpack_alignment = 4;
+ polygon_offset_factor = 0.0f;
+ polygon_offset_units = 0.0f;
+ sample_coverage_value = 1.0f;
+ sample_coverage_invert = false;
+ scissor_x = 0;
+ scissor_y = 0;
+ scissor_width = 1;
+ scissor_height = 1;
+ stencil_front_func = GL_ALWAYS;
+ stencil_front_ref = 0;
+ stencil_front_mask = 0xFFFFFFFFU;
+ stencil_back_func = GL_ALWAYS;
+ stencil_back_ref = 0;
+ stencil_back_mask = 0xFFFFFFFFU;
+ stencil_front_writemask = 0xFFFFFFFFU;
+ cached_stencil_front_writemask = 0xFFFFFFFFU;
+ stencil_back_writemask = 0xFFFFFFFFU;
+ cached_stencil_back_writemask = 0xFFFFFFFFU;
+ stencil_front_fail_op = GL_KEEP;
+ stencil_front_z_fail_op = GL_KEEP;
+ stencil_front_z_pass_op = GL_KEEP;
+ stencil_back_fail_op = GL_KEEP;
+ stencil_back_z_fail_op = GL_KEEP;
+ stencil_back_z_pass_op = GL_KEEP;
+ viewport_x = 0;
+ viewport_y = 0;
+ viewport_width = 1;
+ viewport_height = 1;
+}
+
+void ContextState::InitCapabilities(const ContextState* prev_state) const {
+ if (prev_state) {
+ if (prev_state->enable_flags.cached_blend != enable_flags.cached_blend)
+ EnableDisable(GL_BLEND, enable_flags.cached_blend);
+ if (prev_state->enable_flags.cached_cull_face !=
+ enable_flags.cached_cull_face)
+ EnableDisable(GL_CULL_FACE, enable_flags.cached_cull_face);
+ if (prev_state->enable_flags.cached_depth_test !=
+ enable_flags.cached_depth_test)
+ EnableDisable(GL_DEPTH_TEST, enable_flags.cached_depth_test);
+ if (prev_state->enable_flags.cached_dither != enable_flags.cached_dither)
+ EnableDisable(GL_DITHER, enable_flags.cached_dither);
+ if (prev_state->enable_flags.cached_polygon_offset_fill !=
+ enable_flags.cached_polygon_offset_fill)
+ EnableDisable(GL_POLYGON_OFFSET_FILL,
+ enable_flags.cached_polygon_offset_fill);
+ if (prev_state->enable_flags.cached_sample_alpha_to_coverage !=
+ enable_flags.cached_sample_alpha_to_coverage)
+ EnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE,
+ enable_flags.cached_sample_alpha_to_coverage);
+ if (prev_state->enable_flags.cached_sample_coverage !=
+ enable_flags.cached_sample_coverage)
+ EnableDisable(GL_SAMPLE_COVERAGE, enable_flags.cached_sample_coverage);
+ if (prev_state->enable_flags.cached_scissor_test !=
+ enable_flags.cached_scissor_test)
+ EnableDisable(GL_SCISSOR_TEST, enable_flags.cached_scissor_test);
+ if (prev_state->enable_flags.cached_stencil_test !=
+ enable_flags.cached_stencil_test)
+ EnableDisable(GL_STENCIL_TEST, enable_flags.cached_stencil_test);
+ } else {
+ EnableDisable(GL_BLEND, enable_flags.cached_blend);
+ EnableDisable(GL_CULL_FACE, enable_flags.cached_cull_face);
+ EnableDisable(GL_DEPTH_TEST, enable_flags.cached_depth_test);
+ EnableDisable(GL_DITHER, enable_flags.cached_dither);
+ EnableDisable(GL_POLYGON_OFFSET_FILL,
+ enable_flags.cached_polygon_offset_fill);
+ EnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE,
+ enable_flags.cached_sample_alpha_to_coverage);
+ EnableDisable(GL_SAMPLE_COVERAGE, enable_flags.cached_sample_coverage);
+ EnableDisable(GL_SCISSOR_TEST, enable_flags.cached_scissor_test);
+ EnableDisable(GL_STENCIL_TEST, enable_flags.cached_stencil_test);
+ }
+}
+
+void ContextState::InitState(const ContextState* prev_state) const {
+ if (prev_state) {
+ if ((blend_color_red != prev_state->blend_color_red) ||
+ (blend_color_green != prev_state->blend_color_green) ||
+ (blend_color_blue != prev_state->blend_color_blue) ||
+ (blend_color_alpha != prev_state->blend_color_alpha))
+ glBlendColor(blend_color_red,
+ blend_color_green,
+ blend_color_blue,
+ blend_color_alpha);
+ if ((blend_equation_rgb != prev_state->blend_equation_rgb) ||
+ (blend_equation_alpha != prev_state->blend_equation_alpha))
+ glBlendEquationSeparate(blend_equation_rgb, blend_equation_alpha);
+ if ((blend_source_rgb != prev_state->blend_source_rgb) ||
+ (blend_dest_rgb != prev_state->blend_dest_rgb) ||
+ (blend_source_alpha != prev_state->blend_source_alpha) ||
+ (blend_dest_alpha != prev_state->blend_dest_alpha))
+ glBlendFuncSeparate(blend_source_rgb,
+ blend_dest_rgb,
+ blend_source_alpha,
+ blend_dest_alpha);
+ if ((color_clear_red != prev_state->color_clear_red) ||
+ (color_clear_green != prev_state->color_clear_green) ||
+ (color_clear_blue != prev_state->color_clear_blue) ||
+ (color_clear_alpha != prev_state->color_clear_alpha))
+ glClearColor(color_clear_red,
+ color_clear_green,
+ color_clear_blue,
+ color_clear_alpha);
+ if ((depth_clear != prev_state->depth_clear))
+ glClearDepth(depth_clear);
+ if ((stencil_clear != prev_state->stencil_clear))
+ glClearStencil(stencil_clear);
+ if ((cached_color_mask_red != prev_state->cached_color_mask_red) ||
+ (cached_color_mask_green != prev_state->cached_color_mask_green) ||
+ (cached_color_mask_blue != prev_state->cached_color_mask_blue) ||
+ (cached_color_mask_alpha != prev_state->cached_color_mask_alpha))
+ glColorMask(cached_color_mask_red,
+ cached_color_mask_green,
+ cached_color_mask_blue,
+ cached_color_mask_alpha);
+ if ((cull_mode != prev_state->cull_mode))
+ glCullFace(cull_mode);
+ if ((depth_func != prev_state->depth_func))
+ glDepthFunc(depth_func);
+ if ((cached_depth_mask != prev_state->cached_depth_mask))
+ glDepthMask(cached_depth_mask);
+ if ((z_near != prev_state->z_near) || (z_far != prev_state->z_far))
+ glDepthRange(z_near, z_far);
+ if ((front_face != prev_state->front_face))
+ glFrontFace(front_face);
+ if (prev_state->hint_generate_mipmap != hint_generate_mipmap) {
+ glHint(GL_GENERATE_MIPMAP_HINT, hint_generate_mipmap);
+ }
+ if (feature_info_->feature_flags().oes_standard_derivatives) {
+ if (prev_state->hint_fragment_shader_derivative !=
+ hint_fragment_shader_derivative) {
+ glHint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES,
+ hint_fragment_shader_derivative);
+ }
+ }
+ if ((line_width != prev_state->line_width))
+ glLineWidth(line_width);
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ if (memcmp(prev_state->modelview_matrix,
+ modelview_matrix,
+ sizeof(GLfloat) * 16)) {
+ glMatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
+ }
+ }
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ if (memcmp(prev_state->projection_matrix,
+ projection_matrix,
+ sizeof(GLfloat) * 16)) {
+ glMatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, projection_matrix);
+ }
+ }
+ if (prev_state->pack_alignment != pack_alignment) {
+ glPixelStorei(GL_PACK_ALIGNMENT, pack_alignment);
+ }
+ if (prev_state->unpack_alignment != unpack_alignment) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, unpack_alignment);
+ }
+ if ((polygon_offset_factor != prev_state->polygon_offset_factor) ||
+ (polygon_offset_units != prev_state->polygon_offset_units))
+ glPolygonOffset(polygon_offset_factor, polygon_offset_units);
+ if ((sample_coverage_value != prev_state->sample_coverage_value) ||
+ (sample_coverage_invert != prev_state->sample_coverage_invert))
+ glSampleCoverage(sample_coverage_value, sample_coverage_invert);
+ if ((scissor_x != prev_state->scissor_x) ||
+ (scissor_y != prev_state->scissor_y) ||
+ (scissor_width != prev_state->scissor_width) ||
+ (scissor_height != prev_state->scissor_height))
+ glScissor(scissor_x, scissor_y, scissor_width, scissor_height);
+ if ((stencil_front_func != prev_state->stencil_front_func) ||
+ (stencil_front_ref != prev_state->stencil_front_ref) ||
+ (stencil_front_mask != prev_state->stencil_front_mask))
+ glStencilFuncSeparate(
+ GL_FRONT, stencil_front_func, stencil_front_ref, stencil_front_mask);
+ if ((stencil_back_func != prev_state->stencil_back_func) ||
+ (stencil_back_ref != prev_state->stencil_back_ref) ||
+ (stencil_back_mask != prev_state->stencil_back_mask))
+ glStencilFuncSeparate(
+ GL_BACK, stencil_back_func, stencil_back_ref, stencil_back_mask);
+ if ((cached_stencil_front_writemask !=
+ prev_state->cached_stencil_front_writemask))
+ glStencilMaskSeparate(GL_FRONT, cached_stencil_front_writemask);
+ if ((cached_stencil_back_writemask !=
+ prev_state->cached_stencil_back_writemask))
+ glStencilMaskSeparate(GL_BACK, cached_stencil_back_writemask);
+ if ((stencil_front_fail_op != prev_state->stencil_front_fail_op) ||
+ (stencil_front_z_fail_op != prev_state->stencil_front_z_fail_op) ||
+ (stencil_front_z_pass_op != prev_state->stencil_front_z_pass_op))
+ glStencilOpSeparate(GL_FRONT,
+ stencil_front_fail_op,
+ stencil_front_z_fail_op,
+ stencil_front_z_pass_op);
+ if ((stencil_back_fail_op != prev_state->stencil_back_fail_op) ||
+ (stencil_back_z_fail_op != prev_state->stencil_back_z_fail_op) ||
+ (stencil_back_z_pass_op != prev_state->stencil_back_z_pass_op))
+ glStencilOpSeparate(GL_BACK,
+ stencil_back_fail_op,
+ stencil_back_z_fail_op,
+ stencil_back_z_pass_op);
+ if ((viewport_x != prev_state->viewport_x) ||
+ (viewport_y != prev_state->viewport_y) ||
+ (viewport_width != prev_state->viewport_width) ||
+ (viewport_height != prev_state->viewport_height))
+ glViewport(viewport_x, viewport_y, viewport_width, viewport_height);
+ } else {
+ glBlendColor(blend_color_red,
+ blend_color_green,
+ blend_color_blue,
+ blend_color_alpha);
+ glBlendEquationSeparate(blend_equation_rgb, blend_equation_alpha);
+ glBlendFuncSeparate(
+ blend_source_rgb, blend_dest_rgb, blend_source_alpha, blend_dest_alpha);
+ glClearColor(color_clear_red,
+ color_clear_green,
+ color_clear_blue,
+ color_clear_alpha);
+ glClearDepth(depth_clear);
+ glClearStencil(stencil_clear);
+ glColorMask(cached_color_mask_red,
+ cached_color_mask_green,
+ cached_color_mask_blue,
+ cached_color_mask_alpha);
+ glCullFace(cull_mode);
+ glDepthFunc(depth_func);
+ glDepthMask(cached_depth_mask);
+ glDepthRange(z_near, z_far);
+ glFrontFace(front_face);
+ glHint(GL_GENERATE_MIPMAP_HINT, hint_generate_mipmap);
+ if (feature_info_->feature_flags().oes_standard_derivatives) {
+ glHint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES,
+ hint_fragment_shader_derivative);
+ }
+ glLineWidth(line_width);
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ glMatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
+ }
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ glMatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, projection_matrix);
+ }
+ glPixelStorei(GL_PACK_ALIGNMENT, pack_alignment);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, unpack_alignment);
+ glPolygonOffset(polygon_offset_factor, polygon_offset_units);
+ glSampleCoverage(sample_coverage_value, sample_coverage_invert);
+ glScissor(scissor_x, scissor_y, scissor_width, scissor_height);
+ glStencilFuncSeparate(
+ GL_FRONT, stencil_front_func, stencil_front_ref, stencil_front_mask);
+ glStencilFuncSeparate(
+ GL_BACK, stencil_back_func, stencil_back_ref, stencil_back_mask);
+ glStencilMaskSeparate(GL_FRONT, cached_stencil_front_writemask);
+ glStencilMaskSeparate(GL_BACK, cached_stencil_back_writemask);
+ glStencilOpSeparate(GL_FRONT,
+ stencil_front_fail_op,
+ stencil_front_z_fail_op,
+ stencil_front_z_pass_op);
+ glStencilOpSeparate(GL_BACK,
+ stencil_back_fail_op,
+ stencil_back_z_fail_op,
+ stencil_back_z_pass_op);
+ glViewport(viewport_x, viewport_y, viewport_width, viewport_height);
+ }
+}
+bool ContextState::GetEnabled(GLenum cap) const {
+ switch (cap) {
+ case GL_BLEND:
+ return enable_flags.blend;
+ case GL_CULL_FACE:
+ return enable_flags.cull_face;
+ case GL_DEPTH_TEST:
+ return enable_flags.depth_test;
+ case GL_DITHER:
+ return enable_flags.dither;
+ case GL_POLYGON_OFFSET_FILL:
+ return enable_flags.polygon_offset_fill;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ return enable_flags.sample_alpha_to_coverage;
+ case GL_SAMPLE_COVERAGE:
+ return enable_flags.sample_coverage;
+ case GL_SCISSOR_TEST:
+ return enable_flags.scissor_test;
+ case GL_STENCIL_TEST:
+ return enable_flags.stencil_test;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool ContextState::GetStateAsGLint(GLenum pname,
+ GLint* params,
+ GLsizei* num_written) const {
+ switch (pname) {
+ case GL_BLEND_COLOR:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_color_red);
+ params[1] = static_cast<GLint>(blend_color_green);
+ params[2] = static_cast<GLint>(blend_color_blue);
+ params[3] = static_cast<GLint>(blend_color_alpha);
+ }
+ return true;
+ case GL_BLEND_EQUATION_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_equation_rgb);
+ }
+ return true;
+ case GL_BLEND_EQUATION_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_equation_alpha);
+ }
+ return true;
+ case GL_BLEND_SRC_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_source_rgb);
+ }
+ return true;
+ case GL_BLEND_DST_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_dest_rgb);
+ }
+ return true;
+ case GL_BLEND_SRC_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_source_alpha);
+ }
+ return true;
+ case GL_BLEND_DST_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_dest_alpha);
+ }
+ return true;
+ case GL_COLOR_CLEAR_VALUE:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(color_clear_red);
+ params[1] = static_cast<GLint>(color_clear_green);
+ params[2] = static_cast<GLint>(color_clear_blue);
+ params[3] = static_cast<GLint>(color_clear_alpha);
+ }
+ return true;
+ case GL_DEPTH_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_clear);
+ }
+ return true;
+ case GL_STENCIL_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_clear);
+ }
+ return true;
+ case GL_COLOR_WRITEMASK:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(color_mask_red);
+ params[1] = static_cast<GLint>(color_mask_green);
+ params[2] = static_cast<GLint>(color_mask_blue);
+ params[3] = static_cast<GLint>(color_mask_alpha);
+ }
+ return true;
+ case GL_CULL_FACE_MODE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(cull_mode);
+ }
+ return true;
+ case GL_DEPTH_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_func);
+ }
+ return true;
+ case GL_DEPTH_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_mask);
+ }
+ return true;
+ case GL_DEPTH_RANGE:
+ *num_written = 2;
+ if (params) {
+ params[0] = static_cast<GLint>(z_near);
+ params[1] = static_cast<GLint>(z_far);
+ }
+ return true;
+ case GL_FRONT_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(front_face);
+ }
+ return true;
+ case GL_GENERATE_MIPMAP_HINT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(hint_generate_mipmap);
+ }
+ return true;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(hint_fragment_shader_derivative);
+ }
+ return true;
+ case GL_LINE_WIDTH:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(line_width);
+ }
+ return true;
+ case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ for (size_t i = 0; i < 16; ++i) {
+ params[i] = static_cast<GLint>(round(modelview_matrix[i]));
+ }
+ }
+ return true;
+ case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ for (size_t i = 0; i < 16; ++i) {
+ params[i] = static_cast<GLint>(round(projection_matrix[i]));
+ }
+ }
+ return true;
+ case GL_PACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(pack_alignment);
+ }
+ return true;
+ case GL_UNPACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(unpack_alignment);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FACTOR:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(round(polygon_offset_factor));
+ }
+ return true;
+ case GL_POLYGON_OFFSET_UNITS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(round(polygon_offset_units));
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(sample_coverage_value);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_INVERT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(sample_coverage_invert);
+ }
+ return true;
+ case GL_SCISSOR_BOX:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(scissor_x);
+ params[1] = static_cast<GLint>(scissor_y);
+ params[2] = static_cast<GLint>(scissor_width);
+ params[3] = static_cast<GLint>(scissor_height);
+ }
+ return true;
+ case GL_STENCIL_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_func);
+ }
+ return true;
+ case GL_STENCIL_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_ref);
+ }
+ return true;
+ case GL_STENCIL_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_mask);
+ }
+ return true;
+ case GL_STENCIL_BACK_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_func);
+ }
+ return true;
+ case GL_STENCIL_BACK_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_ref);
+ }
+ return true;
+ case GL_STENCIL_BACK_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_mask);
+ }
+ return true;
+ case GL_STENCIL_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_writemask);
+ }
+ return true;
+ case GL_STENCIL_BACK_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_writemask);
+ }
+ return true;
+ case GL_STENCIL_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_z_pass_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_z_pass_op);
+ }
+ return true;
+ case GL_VIEWPORT:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(viewport_x);
+ params[1] = static_cast<GLint>(viewport_y);
+ params[2] = static_cast<GLint>(viewport_width);
+ params[3] = static_cast<GLint>(viewport_height);
+ }
+ return true;
+ case GL_BLEND:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.blend);
+ }
+ return true;
+ case GL_CULL_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.cull_face);
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.depth_test);
+ }
+ return true;
+ case GL_DITHER:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.dither);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.polygon_offset_fill);
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.sample_alpha_to_coverage);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.sample_coverage);
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.scissor_test);
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.stencil_test);
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ContextState::GetStateAsGLfloat(GLenum pname,
+ GLfloat* params,
+ GLsizei* num_written) const {
+ switch (pname) {
+ case GL_BLEND_COLOR:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_color_red);
+ params[1] = static_cast<GLfloat>(blend_color_green);
+ params[2] = static_cast<GLfloat>(blend_color_blue);
+ params[3] = static_cast<GLfloat>(blend_color_alpha);
+ }
+ return true;
+ case GL_BLEND_EQUATION_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_equation_rgb);
+ }
+ return true;
+ case GL_BLEND_EQUATION_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_equation_alpha);
+ }
+ return true;
+ case GL_BLEND_SRC_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_source_rgb);
+ }
+ return true;
+ case GL_BLEND_DST_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_dest_rgb);
+ }
+ return true;
+ case GL_BLEND_SRC_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_source_alpha);
+ }
+ return true;
+ case GL_BLEND_DST_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_dest_alpha);
+ }
+ return true;
+ case GL_COLOR_CLEAR_VALUE:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(color_clear_red);
+ params[1] = static_cast<GLfloat>(color_clear_green);
+ params[2] = static_cast<GLfloat>(color_clear_blue);
+ params[3] = static_cast<GLfloat>(color_clear_alpha);
+ }
+ return true;
+ case GL_DEPTH_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_clear);
+ }
+ return true;
+ case GL_STENCIL_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_clear);
+ }
+ return true;
+ case GL_COLOR_WRITEMASK:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(color_mask_red);
+ params[1] = static_cast<GLfloat>(color_mask_green);
+ params[2] = static_cast<GLfloat>(color_mask_blue);
+ params[3] = static_cast<GLfloat>(color_mask_alpha);
+ }
+ return true;
+ case GL_CULL_FACE_MODE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(cull_mode);
+ }
+ return true;
+ case GL_DEPTH_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_func);
+ }
+ return true;
+ case GL_DEPTH_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_mask);
+ }
+ return true;
+ case GL_DEPTH_RANGE:
+ *num_written = 2;
+ if (params) {
+ params[0] = static_cast<GLfloat>(z_near);
+ params[1] = static_cast<GLfloat>(z_far);
+ }
+ return true;
+ case GL_FRONT_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(front_face);
+ }
+ return true;
+ case GL_GENERATE_MIPMAP_HINT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(hint_generate_mipmap);
+ }
+ return true;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(hint_fragment_shader_derivative);
+ }
+ return true;
+ case GL_LINE_WIDTH:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(line_width);
+ }
+ return true;
+ case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ memcpy(params, modelview_matrix, sizeof(GLfloat) * 16);
+ }
+ return true;
+ case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ memcpy(params, projection_matrix, sizeof(GLfloat) * 16);
+ }
+ return true;
+ case GL_PACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(pack_alignment);
+ }
+ return true;
+ case GL_UNPACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(unpack_alignment);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FACTOR:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(polygon_offset_factor);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_UNITS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(polygon_offset_units);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(sample_coverage_value);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_INVERT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(sample_coverage_invert);
+ }
+ return true;
+ case GL_SCISSOR_BOX:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(scissor_x);
+ params[1] = static_cast<GLfloat>(scissor_y);
+ params[2] = static_cast<GLfloat>(scissor_width);
+ params[3] = static_cast<GLfloat>(scissor_height);
+ }
+ return true;
+ case GL_STENCIL_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_func);
+ }
+ return true;
+ case GL_STENCIL_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_ref);
+ }
+ return true;
+ case GL_STENCIL_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_mask);
+ }
+ return true;
+ case GL_STENCIL_BACK_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_func);
+ }
+ return true;
+ case GL_STENCIL_BACK_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_ref);
+ }
+ return true;
+ case GL_STENCIL_BACK_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_mask);
+ }
+ return true;
+ case GL_STENCIL_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_writemask);
+ }
+ return true;
+ case GL_STENCIL_BACK_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_writemask);
+ }
+ return true;
+ case GL_STENCIL_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_z_pass_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_z_pass_op);
+ }
+ return true;
+ case GL_VIEWPORT:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(viewport_x);
+ params[1] = static_cast<GLfloat>(viewport_y);
+ params[2] = static_cast<GLfloat>(viewport_width);
+ params[3] = static_cast<GLfloat>(viewport_height);
+ }
+ return true;
+ case GL_BLEND:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.blend);
+ }
+ return true;
+ case GL_CULL_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.cull_face);
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.depth_test);
+ }
+ return true;
+ case GL_DITHER:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.dither);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.polygon_offset_fill);
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.sample_alpha_to_coverage);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.sample_coverage);
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.scissor_test);
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.stencil_test);
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/disk_cache_proto.proto b/gpu/command_buffer/service/disk_cache_proto.proto
new file mode 100644
index 0000000..5a55943
--- /dev/null
+++ b/gpu/command_buffer/service/disk_cache_proto.proto
@@ -0,0 +1,26 @@
+option optimize_for = LITE_RUNTIME;
+
+message ShaderInfoProto {
+ optional int32 type = 1;
+ optional int32 size = 2;
+ optional string name = 3;
+ optional string key = 4;
+ optional int32 precision = 5;
+ optional int32 static_use = 6;
+}
+
+message ShaderProto {
+ optional bytes sha = 1;
+ repeated ShaderInfoProto attribs = 2;
+ repeated ShaderInfoProto uniforms = 3;
+ repeated ShaderInfoProto varyings = 4;
+}
+
+message GpuProgramProto {
+ optional bytes sha = 1;
+ optional int32 format = 2;
+ optional bytes program = 3;
+
+ optional ShaderProto vertex_shader = 4;
+ optional ShaderProto fragment_shader = 5;
+}
diff --git a/gpu/command_buffer/service/error_state.cc b/gpu/command_buffer/service/error_state.cc
new file mode 100644
index 0000000..ce65aa1
--- /dev/null
+++ b/gpu/command_buffer/service/error_state.cc
@@ -0,0 +1,205 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state.h"
+
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ErrorStateImpl : public ErrorState {
+ public:
+ explicit ErrorStateImpl(ErrorStateClient* client, Logger* logger);
+ virtual ~ErrorStateImpl();
+
+ virtual uint32 GetGLError() OVERRIDE;
+
+ virtual void SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) OVERRIDE;
+ virtual void SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) OVERRIDE;
+ virtual void SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ int param) OVERRIDE;
+ virtual void SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ float param) OVERRIDE;
+
+ virtual unsigned int PeekGLError(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ virtual void CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ virtual void ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ private:
+ // The last error message set.
+ std::string last_error_;
+ // Current GL error bits.
+ uint32 error_bits_;
+
+ ErrorStateClient* client_;
+ Logger* logger_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorStateImpl);
+};
+
+ErrorState::ErrorState() {}
+
+ErrorState::~ErrorState() {}
+
+ErrorState* ErrorState::Create(ErrorStateClient* client, Logger* logger) {
+ return new ErrorStateImpl(client, logger);
+}
+
+ErrorStateImpl::ErrorStateImpl(ErrorStateClient* client, Logger* logger)
+ : error_bits_(0), client_(client), logger_(logger) {}
+
+ErrorStateImpl::~ErrorStateImpl() {}
+
+uint32 ErrorStateImpl::GetGLError() {
+ // Check the GL error first, then our wrapped error.
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR && error_bits_ != 0) {
+ for (uint32 mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ }
+
+ if (error != GL_NO_ERROR) {
+ // There was an error, clear the corresponding wrapped error.
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ }
+ return error;
+}
+
+unsigned int ErrorStateImpl::PeekGLError(
+ const char* filename, int line, const char* function_name) {
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ SetGLError(filename, line, error, function_name, "");
+ }
+ return error;
+}
+
+void ErrorStateImpl::SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) {
+ if (msg) {
+ last_error_ = msg;
+ logger_->LogMessage(
+ filename, line,
+ std::string("GL ERROR :") +
+ GLES2Util::GetStringEnum(error) + " : " +
+ function_name + ": " + msg);
+ }
+ error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
+ if (error == GL_OUT_OF_MEMORY)
+ client_->OnOutOfMemoryError();
+}
+
+void ErrorStateImpl::SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) {
+ SetGLError(filename, line, GL_INVALID_ENUM, function_name,
+ (std::string(label) + " was " +
+ GLES2Util::GetStringEnum(value)).c_str());
+}
+
+void ErrorStateImpl::SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname, int param) {
+ if (error == GL_INVALID_ENUM) {
+ SetGLError(
+ filename, line, GL_INVALID_ENUM, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ GLES2Util::GetStringEnum(param)).c_str());
+ } else {
+ SetGLError(
+ filename, line, error, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ base::StringPrintf("%d", param)).c_str());
+ }
+}
+
+void ErrorStateImpl::SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname, float param) {
+ SetGLError(
+ filename, line, error, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ base::StringPrintf("%G", param)).c_str());
+}
+
+void ErrorStateImpl::CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) {
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ SetGLError(filename, line, error, function_name,
+ "<- error from previous GL command");
+ }
+}
+
+void ErrorStateImpl::ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) {
+ // Clears and logs all current gl errors.
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ if (error != GL_OUT_OF_MEMORY) {
+ // GL_OUT_OF_MEMORY can legally happen on lost device.
+ logger_->LogMessage(
+ filename, line,
+ std::string("GL ERROR :") +
+ GLES2Util::GetStringEnum(error) + " : " +
+ function_name + ": was unhandled");
+ NOTREACHED() << "GL error " << error << " was unhandled.";
+ }
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/error_state.h b/gpu/command_buffer/service/error_state.h
new file mode 100644
index 0000000..95f118c
--- /dev/null
+++ b/gpu/command_buffer/service/error_state.h
@@ -0,0 +1,128 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ErrorState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+
+#include <stdint.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Logger;
+
+// Use these macro to synthesize GL errors instead of calling the error_state
+// functions directly as they will propogate the __FILE__ and __LINE__.
+
+// Use to synthesize a GL error on the error_state.
+#define ERRORSTATE_SET_GL_ERROR(error_state, error, function_name, msg) \
+ error_state->SetGLError(__FILE__, __LINE__, error, function_name, msg)
+
+// Use to synthesize an INVALID_ENUM GL error on the error_state. Will attempt
+// to expand the enum to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_ENUM( \
+ error_state, function_name, value, label) \
+ error_state->SetGLErrorInvalidEnum( \
+ __FILE__, __LINE__, function_name, value, label)
+
+// Use to synthesize a GL error on the error_state for an invalid enum based
+// integer parameter. Will attempt to expand the parameter to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_PARAMI( \
+ error_state, error, function_name, pname, param) \
+ error_state->SetGLErrorInvalidParami( \
+ __FILE__, __LINE__, error, function_name, pname, param)
+
+// Use to synthesize a GL error on the error_state for an invalid enum based
+// float parameter. Will attempt to expand the parameter to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_PARAMF( \
+ error_state, error, function_name, pname, param) \
+ error_state->SetGLErrorInvalidParamf( \
+ __FILE__, __LINE__, error, function_name, pname, param)
+
+// Use to move all pending error to the wrapper so on your next GL call
+// you can see if that call generates an error.
+#define ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, function_name) \
+ error_state->CopyRealGLErrorsToWrapper(__FILE__, __LINE__, function_name)
+// Use to look at the real GL error and still pass it on to the user.
+#define ERRORSTATE_PEEK_GL_ERROR(error_state, function_name) \
+ error_state->PeekGLError(__FILE__, __LINE__, function_name)
+// Use to clear all current GL errors. FAILS if there are any.
+#define ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state, function_name) \
+ error_state->ClearRealGLErrors(__FILE__, __LINE__, function_name)
+
+class GPU_EXPORT ErrorStateClient {
+ public:
+ // GL_OUT_OF_MEMORY can cause side effects such as losing the context.
+ virtual void OnOutOfMemoryError() = 0;
+};
+
+class GPU_EXPORT ErrorState {
+ public:
+ virtual ~ErrorState();
+
+ static ErrorState* Create(ErrorStateClient* client, Logger* logger);
+
+ virtual uint32_t GetGLError() = 0;
+
+ virtual void SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) = 0;
+ virtual void SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) = 0;
+ virtual void SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ int param) = 0;
+ virtual void SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ float param) = 0;
+
+ // Gets the GLError and stores it in our wrapper. Effectively
+ // this lets us peek at the error without losing it.
+ virtual unsigned int PeekGLError(
+ const char* filename, int line, const char* function_name) = 0;
+
+ // Copies the real GL errors to the wrapper. This is so we can
+ // make sure there are no native GL errors before calling some GL function
+ // so that on return we know any error generated was for that specific
+ // command.
+ virtual void CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) = 0;
+
+ // Clear all real GL errors. This is to prevent the client from seeing any
+ // errors caused by GL calls that it was not responsible for issuing.
+ virtual void ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) = 0;
+
+ protected:
+ ErrorState();
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorState);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+
diff --git a/gpu/command_buffer/service/error_state_mock.cc b/gpu/command_buffer/service/error_state_mock.cc
new file mode 100644
index 0000000..f3925d7
--- /dev/null
+++ b/gpu/command_buffer/service/error_state_mock.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+MockErrorState::MockErrorState()
+ : ErrorState() {}
+
+MockErrorState::~MockErrorState() {}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/error_state_mock.h b/gpu/command_buffer/service/error_state_mock.h
new file mode 100644
index 0000000..eb056f3
--- /dev/null
+++ b/gpu/command_buffer/service/error_state_mock.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the mock ErrorState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+
+#include "gpu/command_buffer/service/error_state.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MockErrorState : public ErrorState {
+ public:
+ MockErrorState();
+ virtual ~MockErrorState();
+
+ MOCK_METHOD0(GetGLError, uint32_t());
+ MOCK_METHOD5(SetGLError, void(
+ const char* filename, int line,
+ unsigned error, const char* function_name, const char* msg));
+ MOCK_METHOD5(SetGLErrorInvalidEnum, void(
+ const char* filename, int line,
+ const char* function_name, unsigned value, const char* label));
+ MOCK_METHOD6(SetGLErrorInvalidParami, void(
+ const char* filename,
+ int line,
+ unsigned error,
+ const char* function_name,
+ unsigned pname,
+ int param));
+ MOCK_METHOD6(SetGLErrorInvalidParamf, void(
+ const char* filename,
+ int line,
+ unsigned error,
+ const char* function_name,
+ unsigned pname,
+ float param));
+ MOCK_METHOD3(PeekGLError, unsigned(
+ const char* file, int line, const char* filename));
+ MOCK_METHOD3(CopyRealGLErrorsToWrapper, void(
+ const char* file, int line, const char* filename));
+ MOCK_METHOD3(ClearRealGLErrors, void(
+ const char* file, int line, const char* filename));
+
+ DISALLOW_COPY_AND_ASSIGN(MockErrorState);
+};
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+
diff --git a/gpu/command_buffer/service/feature_info.cc b/gpu/command_buffer/service/feature_info.cc
new file mode 100644
index 0000000..e853d9b
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info.cc
@@ -0,0 +1,881 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/feature_info.h"
+
+#include <set>
+
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+struct FormatInfo {
+ GLenum format;
+ const GLenum* types;
+ size_t count;
+};
+
+class StringSet {
+ public:
+ StringSet() {}
+
+ StringSet(const char* s) {
+ Init(s);
+ }
+
+ StringSet(const std::string& str) {
+ Init(str);
+ }
+
+ void Init(const char* s) {
+ std::string str(s ? s : "");
+ Init(str);
+ }
+
+ void Init(const std::string& str) {
+ std::vector<std::string> tokens;
+ Tokenize(str, " ", &tokens);
+ string_set_.insert(tokens.begin(), tokens.end());
+ }
+
+ bool Contains(const char* s) {
+ return string_set_.find(s) != string_set_.end();
+ }
+
+ bool Contains(const std::string& s) {
+ return string_set_.find(s) != string_set_.end();
+ }
+
+ private:
+ std::set<std::string> string_set_;
+};
+
+// Process a string of wordaround type IDs (seperated by ',') and set up
+// the corresponding Workaround flags.
+void StringToWorkarounds(
+ const std::string& types, FeatureInfo::Workarounds* workarounds) {
+ DCHECK(workarounds);
+ std::vector<std::string> pieces;
+ base::SplitString(types, ',', &pieces);
+ for (size_t i = 0; i < pieces.size(); ++i) {
+ int number = 0;
+ bool succeed = base::StringToInt(pieces[i], &number);
+ DCHECK(succeed);
+ switch (number) {
+#define GPU_OP(type, name) \
+ case gpu::type: \
+ workarounds->name = true; \
+ break;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ default:
+ NOTIMPLEMENTED();
+ }
+ }
+ if (workarounds->max_texture_size_limit_4096)
+ workarounds->max_texture_size = 4096;
+ if (workarounds->max_cube_map_texture_size_limit_4096)
+ workarounds->max_cube_map_texture_size = 4096;
+ if (workarounds->max_cube_map_texture_size_limit_1024)
+ workarounds->max_cube_map_texture_size = 1024;
+ if (workarounds->max_cube_map_texture_size_limit_512)
+ workarounds->max_cube_map_texture_size = 512;
+
+ if (workarounds->max_fragment_uniform_vectors_32)
+ workarounds->max_fragment_uniform_vectors = 32;
+ if (workarounds->max_varying_vectors_16)
+ workarounds->max_varying_vectors = 16;
+ if (workarounds->max_vertex_uniform_vectors_256)
+ workarounds->max_vertex_uniform_vectors = 256;
+}
+
+} // anonymous namespace.
+
+FeatureInfo::FeatureFlags::FeatureFlags()
+ : chromium_color_buffer_float_rgba(false),
+ chromium_color_buffer_float_rgb(false),
+ chromium_framebuffer_multisample(false),
+ chromium_sync_query(false),
+ use_core_framebuffer_multisample(false),
+ multisampled_render_to_texture(false),
+ use_img_for_multisampled_render_to_texture(false),
+ oes_standard_derivatives(false),
+ oes_egl_image_external(false),
+ oes_depth24(false),
+ oes_compressed_etc1_rgb8_texture(false),
+ packed_depth24_stencil8(false),
+ npot_ok(false),
+ enable_texture_float_linear(false),
+ enable_texture_half_float_linear(false),
+ angle_translated_shader_source(false),
+ angle_pack_reverse_row_order(false),
+ arb_texture_rectangle(false),
+ angle_instanced_arrays(false),
+ occlusion_query_boolean(false),
+ use_arb_occlusion_query2_for_occlusion_query_boolean(false),
+ use_arb_occlusion_query_for_occlusion_query_boolean(false),
+ native_vertex_array_object(false),
+ ext_texture_format_bgra8888(false),
+ enable_shader_name_hashing(false),
+ enable_samplers(false),
+ ext_draw_buffers(false),
+ ext_frag_depth(false),
+ ext_shader_texture_lod(false),
+ use_async_readpixels(false),
+ map_buffer_range(false),
+ ext_discard_framebuffer(false),
+ angle_depth_texture(false),
+ is_angle(false),
+ is_swiftshader(false),
+ angle_texture_usage(false),
+ ext_texture_storage(false),
+ chromium_path_rendering(false) {
+}
+
+FeatureInfo::Workarounds::Workarounds() :
+#define GPU_OP(type, name) name(false),
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ max_texture_size(0),
+ max_cube_map_texture_size(0),
+ max_fragment_uniform_vectors(0),
+ max_varying_vectors(0),
+ max_vertex_uniform_vectors(0) {
+}
+
+FeatureInfo::FeatureInfo() {
+ InitializeBasicState(*CommandLine::ForCurrentProcess());
+}
+
+FeatureInfo::FeatureInfo(const CommandLine& command_line) {
+ InitializeBasicState(command_line);
+}
+
+void FeatureInfo::InitializeBasicState(const CommandLine& command_line) {
+ if (command_line.HasSwitch(switches::kGpuDriverBugWorkarounds)) {
+ std::string types = command_line.GetSwitchValueASCII(
+ switches::kGpuDriverBugWorkarounds);
+ StringToWorkarounds(types, &workarounds_);
+ }
+ feature_flags_.enable_shader_name_hashing =
+ !command_line.HasSwitch(switches::kDisableShaderNameHashing);
+
+ feature_flags_.is_swiftshader =
+ (command_line.GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
+
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ ValueValidator<GLenum>& validator = texture_format_validators_[info.format];
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ validator.AddValue(info.types[jj]);
+ }
+ }
+}
+
+bool FeatureInfo::Initialize() {
+ disallowed_features_ = DisallowedFeatures();
+ InitializeFeatures();
+ return true;
+}
+
+bool FeatureInfo::Initialize(const DisallowedFeatures& disallowed_features) {
+ disallowed_features_ = disallowed_features;
+ InitializeFeatures();
+ return true;
+}
+
+void FeatureInfo::InitializeFeatures() {
+ // Figure out what extensions to turn on.
+ StringSet extensions(
+ reinterpret_cast<const char*>(glGetString(GL_EXTENSIONS)));
+
+ const char* renderer_str =
+ reinterpret_cast<const char*>(glGetString(GL_RENDERER));
+ if (renderer_str) {
+ feature_flags_.is_angle = StartsWithASCII(renderer_str, "ANGLE", true);
+ }
+
+ bool is_es3 = false;
+ const char* version_str =
+ reinterpret_cast<const char*>(glGetString(GL_VERSION));
+ if (version_str) {
+ std::string lstr(base::StringToLowerASCII(std::string(version_str)));
+ is_es3 = (lstr.substr(0, 12) == "opengl es 3.");
+ }
+
+ AddExtensionString("GL_ANGLE_translated_shader_source");
+ AddExtensionString("GL_CHROMIUM_async_pixel_transfers");
+ AddExtensionString("GL_CHROMIUM_bind_uniform_location");
+ AddExtensionString("GL_CHROMIUM_command_buffer_query");
+ AddExtensionString("GL_CHROMIUM_command_buffer_latency_query");
+ AddExtensionString("GL_CHROMIUM_copy_texture");
+ AddExtensionString("GL_CHROMIUM_get_error_query");
+ AddExtensionString("GL_CHROMIUM_lose_context");
+ AddExtensionString("GL_CHROMIUM_pixel_transfer_buffer_object");
+ AddExtensionString("GL_CHROMIUM_rate_limit_offscreen_context");
+ AddExtensionString("GL_CHROMIUM_resize");
+ AddExtensionString("GL_CHROMIUM_resource_safe");
+ AddExtensionString("GL_CHROMIUM_strict_attribs");
+ AddExtensionString("GL_CHROMIUM_texture_mailbox");
+ AddExtensionString("GL_EXT_debug_marker");
+
+ // OES_vertex_array_object is emulated if not present natively,
+ // so the extension string is always exposed.
+ AddExtensionString("GL_OES_vertex_array_object");
+
+ if (!disallowed_features_.gpu_memory_manager)
+ AddExtensionString("GL_CHROMIUM_gpu_memory_manager");
+
+ if (extensions.Contains("GL_ANGLE_translated_shader_source")) {
+ feature_flags_.angle_translated_shader_source = true;
+ }
+
+ // Check if we should allow GL_EXT_texture_compression_dxt1 and
+ // GL_EXT_texture_compression_s3tc.
+ bool enable_dxt1 = false;
+ bool enable_dxt3 = false;
+ bool enable_dxt5 = false;
+ bool have_s3tc = extensions.Contains("GL_EXT_texture_compression_s3tc");
+ bool have_dxt3 =
+ have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt3");
+ bool have_dxt5 =
+ have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt5");
+
+ if (extensions.Contains("GL_EXT_texture_compression_dxt1") || have_s3tc) {
+ enable_dxt1 = true;
+ }
+ if (have_dxt3) {
+ enable_dxt3 = true;
+ }
+ if (have_dxt5) {
+ enable_dxt5 = true;
+ }
+
+ if (enable_dxt1) {
+ AddExtensionString("GL_EXT_texture_compression_dxt1");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT);
+ }
+
+ if (enable_dxt3) {
+ // The difference between GL_EXT_texture_compression_s3tc and
+ // GL_CHROMIUM_texture_compression_dxt3 is that the former
+ // requires on the fly compression. The latter does not.
+ AddExtensionString("GL_CHROMIUM_texture_compression_dxt3");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT);
+ }
+
+ if (enable_dxt5) {
+ // The difference between GL_EXT_texture_compression_s3tc and
+ // GL_CHROMIUM_texture_compression_dxt5 is that the former
+ // requires on the fly compression. The latter does not.
+ AddExtensionString("GL_CHROMIUM_texture_compression_dxt5");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT);
+ }
+
+ // Check if we should enable GL_EXT_texture_filter_anisotropic.
+ if (extensions.Contains("GL_EXT_texture_filter_anisotropic")) {
+ AddExtensionString("GL_EXT_texture_filter_anisotropic");
+ validators_.texture_parameter.AddValue(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT);
+ validators_.g_l_state.AddValue(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT);
+ }
+
+ // Check if we should support GL_OES_packed_depth_stencil and/or
+ // GL_GOOGLE_depth_texture / GL_CHROMIUM_depth_texture.
+ //
+ // NOTE: GL_OES_depth_texture requires support for depth cubemaps.
+ // GL_ARB_depth_texture requires other features that
+ // GL_OES_packed_depth_stencil does not provide.
+ //
+ // Therefore we made up GL_GOOGLE_depth_texture / GL_CHROMIUM_depth_texture.
+ //
+ // GL_GOOGLE_depth_texture is legacy. As we exposed it into NaCl we can't
+ // get rid of it.
+ //
+ bool enable_depth_texture = false;
+ if (!workarounds_.disable_depth_texture &&
+ (extensions.Contains("GL_ARB_depth_texture") ||
+ extensions.Contains("GL_OES_depth_texture") ||
+ extensions.Contains("GL_ANGLE_depth_texture") || is_es3)) {
+ enable_depth_texture = true;
+ feature_flags_.angle_depth_texture =
+ extensions.Contains("GL_ANGLE_depth_texture");
+ }
+
+ if (enable_depth_texture) {
+ AddExtensionString("GL_CHROMIUM_depth_texture");
+ AddExtensionString("GL_GOOGLE_depth_texture");
+ texture_format_validators_[GL_DEPTH_COMPONENT].AddValue(GL_UNSIGNED_SHORT);
+ texture_format_validators_[GL_DEPTH_COMPONENT].AddValue(GL_UNSIGNED_INT);
+ validators_.texture_internal_format.AddValue(GL_DEPTH_COMPONENT);
+ validators_.texture_format.AddValue(GL_DEPTH_COMPONENT);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_INT);
+ }
+
+ if (extensions.Contains("GL_EXT_packed_depth_stencil") ||
+ extensions.Contains("GL_OES_packed_depth_stencil") || is_es3) {
+ AddExtensionString("GL_OES_packed_depth_stencil");
+ feature_flags_.packed_depth24_stencil8 = true;
+ if (enable_depth_texture) {
+ texture_format_validators_[GL_DEPTH_STENCIL]
+ .AddValue(GL_UNSIGNED_INT_24_8);
+ validators_.texture_internal_format.AddValue(GL_DEPTH_STENCIL);
+ validators_.texture_format.AddValue(GL_DEPTH_STENCIL);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_INT_24_8);
+ }
+ validators_.render_buffer_format.AddValue(GL_DEPTH24_STENCIL8);
+ }
+
+ if (is_es3 || extensions.Contains("GL_OES_vertex_array_object") ||
+ extensions.Contains("GL_ARB_vertex_array_object") ||
+ extensions.Contains("GL_APPLE_vertex_array_object")) {
+ feature_flags_.native_vertex_array_object = true;
+ }
+
+ // If we're using client_side_arrays we have to emulate
+ // vertex array objects since vertex array objects do not work
+ // with client side arrays.
+ if (workarounds_.use_client_side_arrays_for_stream_buffers) {
+ feature_flags_.native_vertex_array_object = false;
+ }
+
+ if (is_es3 || extensions.Contains("GL_OES_element_index_uint") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_OES_element_index_uint");
+ validators_.index_type.AddValue(GL_UNSIGNED_INT);
+ }
+
+ bool enable_texture_format_bgra8888 = false;
+ bool enable_read_format_bgra = false;
+ bool enable_render_buffer_bgra = false;
+ bool enable_immutable_texture_format_bgra_on_es3 =
+ extensions.Contains("GL_APPLE_texture_format_BGRA8888");
+
+ // Check if we should allow GL_EXT_texture_format_BGRA8888
+ if (extensions.Contains("GL_EXT_texture_format_BGRA8888") ||
+ enable_immutable_texture_format_bgra_on_es3 ||
+ extensions.Contains("GL_EXT_bgra")) {
+ enable_texture_format_bgra8888 = true;
+ }
+
+ if (extensions.Contains("GL_EXT_bgra")) {
+ enable_render_buffer_bgra = true;
+ }
+
+ if (extensions.Contains("GL_EXT_read_format_bgra") ||
+ extensions.Contains("GL_EXT_bgra")) {
+ enable_read_format_bgra = true;
+ }
+
+ if (enable_texture_format_bgra8888) {
+ feature_flags_.ext_texture_format_bgra8888 = true;
+ AddExtensionString("GL_EXT_texture_format_BGRA8888");
+ texture_format_validators_[GL_BGRA_EXT].AddValue(GL_UNSIGNED_BYTE);
+ validators_.texture_internal_format.AddValue(GL_BGRA_EXT);
+ validators_.texture_format.AddValue(GL_BGRA_EXT);
+ }
+
+ if (enable_read_format_bgra) {
+ AddExtensionString("GL_EXT_read_format_bgra");
+ validators_.read_pixel_format.AddValue(GL_BGRA_EXT);
+ }
+
+ if (enable_render_buffer_bgra) {
+ AddExtensionString("GL_CHROMIUM_renderbuffer_format_BGRA8888");
+ validators_.render_buffer_format.AddValue(GL_BGRA8_EXT);
+ }
+
+ if (extensions.Contains("GL_OES_rgb8_rgba8") || gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_OES_rgb8_rgba8");
+ validators_.render_buffer_format.AddValue(GL_RGB8_OES);
+ validators_.render_buffer_format.AddValue(GL_RGBA8_OES);
+ }
+
+ // Check if we should allow GL_OES_texture_npot
+ if (is_es3 || extensions.Contains("GL_ARB_texture_non_power_of_two") ||
+ extensions.Contains("GL_OES_texture_npot")) {
+ AddExtensionString("GL_OES_texture_npot");
+ feature_flags_.npot_ok = true;
+ }
+
+ // Check if we should allow GL_OES_texture_float, GL_OES_texture_half_float,
+ // GL_OES_texture_float_linear, GL_OES_texture_half_float_linear
+ bool enable_texture_float = false;
+ bool enable_texture_float_linear = false;
+ bool enable_texture_half_float = false;
+ bool enable_texture_half_float_linear = false;
+
+ bool may_enable_chromium_color_buffer_float = false;
+
+ if (extensions.Contains("GL_ARB_texture_float")) {
+ enable_texture_float = true;
+ enable_texture_float_linear = true;
+ enable_texture_half_float = true;
+ enable_texture_half_float_linear = true;
+ may_enable_chromium_color_buffer_float = true;
+ } else {
+ if (is_es3 || extensions.Contains("GL_OES_texture_float")) {
+ enable_texture_float = true;
+ if (extensions.Contains("GL_OES_texture_float_linear")) {
+ enable_texture_float_linear = true;
+ }
+ if ((is_es3 && extensions.Contains("GL_EXT_color_buffer_float")) ||
+ feature_flags_.is_angle) {
+ may_enable_chromium_color_buffer_float = true;
+ }
+ }
+ // TODO(dshwang): GLES3 supports half float by default but GL_HALF_FLOAT_OES
+ // isn't equal to GL_HALF_FLOAT.
+ if (extensions.Contains("GL_OES_texture_half_float")) {
+ enable_texture_half_float = true;
+ if (extensions.Contains("GL_OES_texture_half_float_linear")) {
+ enable_texture_half_float_linear = true;
+ }
+ }
+ }
+
+ if (enable_texture_float) {
+ texture_format_validators_[GL_ALPHA].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_RGB].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_RGBA].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_LUMINANCE].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_LUMINANCE_ALPHA].AddValue(GL_FLOAT);
+ validators_.pixel_type.AddValue(GL_FLOAT);
+ validators_.read_pixel_type.AddValue(GL_FLOAT);
+ AddExtensionString("GL_OES_texture_float");
+ if (enable_texture_float_linear) {
+ AddExtensionString("GL_OES_texture_float_linear");
+ }
+ }
+
+ if (enable_texture_half_float) {
+ texture_format_validators_[GL_ALPHA].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_RGB].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_RGBA].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_LUMINANCE].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_LUMINANCE_ALPHA].AddValue(GL_HALF_FLOAT_OES);
+ validators_.pixel_type.AddValue(GL_HALF_FLOAT_OES);
+ validators_.read_pixel_type.AddValue(GL_HALF_FLOAT_OES);
+ AddExtensionString("GL_OES_texture_half_float");
+ if (enable_texture_half_float_linear) {
+ AddExtensionString("GL_OES_texture_half_float_linear");
+ }
+ }
+
+ if (may_enable_chromium_color_buffer_float) {
+ COMPILE_ASSERT(GL_RGBA32F_ARB == GL_RGBA32F &&
+ GL_RGBA32F_EXT == GL_RGBA32F &&
+ GL_RGB32F_ARB == GL_RGB32F &&
+ GL_RGB32F_EXT == GL_RGB32F,
+ sized_float_internal_format_variations_must_match);
+ // We don't check extension support beyond ARB_texture_float on desktop GL,
+ // and format support varies between GL configurations. For example, spec
+ // prior to OpenGL 3.0 mandates framebuffer support only for one
+ // implementation-chosen format, and ES3.0 EXT_color_buffer_float does not
+ // support rendering to RGB32F. Check for framebuffer completeness with
+ // formats that the extensions expose, and only enable an extension when a
+ // framebuffer created with its texture format is reported as complete.
+ GLint fb_binding = 0;
+ GLint tex_binding = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &fb_binding);
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &tex_binding);
+
+ GLuint tex_id = 0;
+ GLuint fb_id = 0;
+ GLsizei width = 16;
+
+ glGenTextures(1, &tex_id);
+ glGenFramebuffersEXT(1, &fb_id);
+ glBindTexture(GL_TEXTURE_2D, tex_id);
+ // Nearest filter needed for framebuffer completeness on some drivers.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, width, 0, GL_RGBA,
+ GL_FLOAT, NULL);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, fb_id);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, tex_id, 0);
+ GLenum statusRGBA = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, width, 0, GL_RGB,
+ GL_FLOAT, NULL);
+ GLenum statusRGB = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ glDeleteFramebuffersEXT(1, &fb_id);
+ glDeleteTextures(1, &tex_id);
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, static_cast<GLuint>(fb_binding));
+ glBindTexture(GL_TEXTURE_2D, static_cast<GLuint>(tex_binding));
+
+ DCHECK(glGetError() == GL_NO_ERROR);
+
+ if (statusRGBA == GL_FRAMEBUFFER_COMPLETE) {
+ validators_.texture_internal_format.AddValue(GL_RGBA32F);
+ feature_flags_.chromium_color_buffer_float_rgba = true;
+ AddExtensionString("GL_CHROMIUM_color_buffer_float_rgba");
+ }
+ if (statusRGB == GL_FRAMEBUFFER_COMPLETE) {
+ validators_.texture_internal_format.AddValue(GL_RGB32F);
+ feature_flags_.chromium_color_buffer_float_rgb = true;
+ AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb");
+ }
+ }
+
+ // Check for multisample support
+ if (!workarounds_.disable_multisampling) {
+ bool ext_has_multisample =
+ extensions.Contains("GL_EXT_framebuffer_multisample") || is_es3;
+ if (feature_flags_.is_angle) {
+ ext_has_multisample |=
+ extensions.Contains("GL_ANGLE_framebuffer_multisample");
+ }
+ feature_flags_.use_core_framebuffer_multisample = is_es3;
+ if (ext_has_multisample) {
+ feature_flags_.chromium_framebuffer_multisample = true;
+ validators_.frame_buffer_target.AddValue(GL_READ_FRAMEBUFFER_EXT);
+ validators_.frame_buffer_target.AddValue(GL_DRAW_FRAMEBUFFER_EXT);
+ validators_.g_l_state.AddValue(GL_READ_FRAMEBUFFER_BINDING_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_SAMPLES_EXT);
+ validators_.render_buffer_parameter.AddValue(GL_RENDERBUFFER_SAMPLES_EXT);
+ AddExtensionString("GL_CHROMIUM_framebuffer_multisample");
+ }
+ if (extensions.Contains("GL_EXT_multisampled_render_to_texture")) {
+ feature_flags_.multisampled_render_to_texture = true;
+ } else if (extensions.Contains("GL_IMG_multisampled_render_to_texture")) {
+ feature_flags_.multisampled_render_to_texture = true;
+ feature_flags_.use_img_for_multisampled_render_to_texture = true;
+ }
+ if (feature_flags_.multisampled_render_to_texture) {
+ validators_.render_buffer_parameter.AddValue(
+ GL_RENDERBUFFER_SAMPLES_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_SAMPLES_EXT);
+ validators_.frame_buffer_parameter.AddValue(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT);
+ AddExtensionString("GL_EXT_multisampled_render_to_texture");
+ }
+ }
+
+ if (extensions.Contains("GL_OES_depth24") || gfx::HasDesktopGLFeatures() ||
+ is_es3) {
+ AddExtensionString("GL_OES_depth24");
+ feature_flags_.oes_depth24 = true;
+ validators_.render_buffer_format.AddValue(GL_DEPTH_COMPONENT24);
+ }
+
+ if (!workarounds_.disable_oes_standard_derivatives &&
+ (is_es3 || extensions.Contains("GL_OES_standard_derivatives") ||
+ gfx::HasDesktopGLFeatures())) {
+ AddExtensionString("GL_OES_standard_derivatives");
+ feature_flags_.oes_standard_derivatives = true;
+ validators_.hint_target.AddValue(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES);
+ validators_.g_l_state.AddValue(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES);
+ }
+
+ if (extensions.Contains("GL_OES_EGL_image_external")) {
+ AddExtensionString("GL_OES_EGL_image_external");
+ feature_flags_.oes_egl_image_external = true;
+ validators_.texture_bind_target.AddValue(GL_TEXTURE_EXTERNAL_OES);
+ validators_.get_tex_param_target.AddValue(GL_TEXTURE_EXTERNAL_OES);
+ validators_.texture_parameter.AddValue(GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES);
+ validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_EXTERNAL_OES);
+ }
+
+ if (extensions.Contains("GL_OES_compressed_ETC1_RGB8_texture")) {
+ AddExtensionString("GL_OES_compressed_ETC1_RGB8_texture");
+ feature_flags_.oes_compressed_etc1_rgb8_texture = true;
+ validators_.compressed_texture_format.AddValue(GL_ETC1_RGB8_OES);
+ }
+
+ if (extensions.Contains("GL_AMD_compressed_ATC_texture")) {
+ AddExtensionString("GL_AMD_compressed_ATC_texture");
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGB_AMD);
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD);
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD);
+ }
+
+ if (extensions.Contains("GL_IMG_texture_compression_pvrtc")) {
+ AddExtensionString("GL_IMG_texture_compression_pvrtc");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG);
+ }
+
+ // Ideally we would only expose this extension on Mac OS X, to
+ // support GL_CHROMIUM_iosurface and the compositor. We don't want
+ // applications to start using it; they should use ordinary non-
+ // power-of-two textures. However, for unit testing purposes we
+ // expose it on all supported platforms.
+ if (extensions.Contains("GL_ARB_texture_rectangle")) {
+ AddExtensionString("GL_ARB_texture_rectangle");
+ feature_flags_.arb_texture_rectangle = true;
+ validators_.texture_bind_target.AddValue(GL_TEXTURE_RECTANGLE_ARB);
+ // For the moment we don't add this enum to the texture_target
+ // validator. This implies that the only way to get image data into a
+ // rectangular texture is via glTexImageIOSurface2DCHROMIUM, which is
+ // just fine since again we don't want applications depending on this
+ // extension.
+ validators_.get_tex_param_target.AddValue(GL_TEXTURE_RECTANGLE_ARB);
+ validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_RECTANGLE_ARB);
+ }
+
+#if defined(OS_MACOSX)
+ AddExtensionString("GL_CHROMIUM_iosurface");
+#endif
+
+ // TODO(gman): Add support for these extensions.
+ // GL_OES_depth32
+
+ feature_flags_.enable_texture_float_linear |= enable_texture_float_linear;
+ feature_flags_.enable_texture_half_float_linear |=
+ enable_texture_half_float_linear;
+
+ if (extensions.Contains("GL_ANGLE_pack_reverse_row_order")) {
+ AddExtensionString("GL_ANGLE_pack_reverse_row_order");
+ feature_flags_.angle_pack_reverse_row_order = true;
+ validators_.pixel_store.AddValue(GL_PACK_REVERSE_ROW_ORDER_ANGLE);
+ validators_.g_l_state.AddValue(GL_PACK_REVERSE_ROW_ORDER_ANGLE);
+ }
+
+ if (extensions.Contains("GL_ANGLE_texture_usage")) {
+ feature_flags_.angle_texture_usage = true;
+ AddExtensionString("GL_ANGLE_texture_usage");
+ validators_.texture_parameter.AddValue(GL_TEXTURE_USAGE_ANGLE);
+ }
+
+ // Note: Only APPLE_texture_format_BGRA8888 extension allows BGRA8_EXT in
+ // ES3's glTexStorage2D. We prefer support BGRA to texture storage.
+ // So we don't expose GL_EXT_texture_storage when ES3 +
+ // GL_EXT_texture_format_BGRA8888 because we fail the GL_BGRA8 requirement.
+ // However we expose GL_EXT_texture_storage when just ES3 because we don't
+ // claim to handle GL_BGRA8.
+ bool support_texture_storage_on_es3 =
+ (is_es3 && enable_immutable_texture_format_bgra_on_es3) ||
+ (is_es3 && !enable_texture_format_bgra8888);
+ if (extensions.Contains("GL_EXT_texture_storage") ||
+ extensions.Contains("GL_ARB_texture_storage") ||
+ support_texture_storage_on_es3) {
+ feature_flags_.ext_texture_storage = true;
+ AddExtensionString("GL_EXT_texture_storage");
+ validators_.texture_parameter.AddValue(GL_TEXTURE_IMMUTABLE_FORMAT_EXT);
+ if (enable_texture_format_bgra8888)
+ validators_.texture_internal_format_storage.AddValue(GL_BGRA8_EXT);
+ if (enable_texture_float) {
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_ALPHA32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE_ALPHA32F_EXT);
+ }
+ if (enable_texture_half_float) {
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_ALPHA16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE_ALPHA16F_EXT);
+ }
+ }
+
+ bool have_ext_occlusion_query_boolean =
+ extensions.Contains("GL_EXT_occlusion_query_boolean");
+ bool have_arb_occlusion_query2 =
+ extensions.Contains("GL_ARB_occlusion_query2");
+ bool have_arb_occlusion_query =
+ extensions.Contains("GL_ARB_occlusion_query");
+
+ if (!workarounds_.disable_ext_occlusion_query &&
+ (have_ext_occlusion_query_boolean ||
+ have_arb_occlusion_query2 ||
+ have_arb_occlusion_query)) {
+ AddExtensionString("GL_EXT_occlusion_query_boolean");
+ feature_flags_.occlusion_query_boolean = true;
+ feature_flags_.use_arb_occlusion_query2_for_occlusion_query_boolean =
+ !have_ext_occlusion_query_boolean && have_arb_occlusion_query2;
+ feature_flags_.use_arb_occlusion_query_for_occlusion_query_boolean =
+ !have_ext_occlusion_query_boolean && have_arb_occlusion_query &&
+ !have_arb_occlusion_query2;
+ }
+
+ if (!workarounds_.disable_angle_instanced_arrays &&
+ (extensions.Contains("GL_ANGLE_instanced_arrays") ||
+ (extensions.Contains("GL_ARB_instanced_arrays") &&
+ extensions.Contains("GL_ARB_draw_instanced")) ||
+ is_es3)) {
+ AddExtensionString("GL_ANGLE_instanced_arrays");
+ feature_flags_.angle_instanced_arrays = true;
+ validators_.vertex_attribute.AddValue(GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE);
+ }
+
+ if (!workarounds_.disable_ext_draw_buffers &&
+ (extensions.Contains("GL_ARB_draw_buffers") ||
+ extensions.Contains("GL_EXT_draw_buffers"))) {
+ AddExtensionString("GL_EXT_draw_buffers");
+ feature_flags_.ext_draw_buffers = true;
+
+ GLint max_color_attachments = 0;
+ glGetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, &max_color_attachments);
+ for (GLenum i = GL_COLOR_ATTACHMENT1_EXT;
+ i < static_cast<GLenum>(GL_COLOR_ATTACHMENT0 + max_color_attachments);
+ ++i) {
+ validators_.attachment.AddValue(i);
+ }
+ COMPILE_ASSERT(GL_COLOR_ATTACHMENT0_EXT == GL_COLOR_ATTACHMENT0,
+ color_attachment0_variation_must_match);
+
+ validators_.g_l_state.AddValue(GL_MAX_COLOR_ATTACHMENTS_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_DRAW_BUFFERS_ARB);
+ GLint max_draw_buffers = 0;
+ glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &max_draw_buffers);
+ for (GLenum i = GL_DRAW_BUFFER0_ARB;
+ i < static_cast<GLenum>(GL_DRAW_BUFFER0_ARB + max_draw_buffers);
+ ++i) {
+ validators_.g_l_state.AddValue(i);
+ }
+ }
+
+ if (is_es3 || extensions.Contains("GL_EXT_blend_minmax") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_blend_minmax");
+ validators_.equation.AddValue(GL_MIN_EXT);
+ validators_.equation.AddValue(GL_MAX_EXT);
+ COMPILE_ASSERT(GL_MIN_EXT == GL_MIN && GL_MAX_EXT == GL_MAX,
+ min_max_variations_must_match);
+ }
+
+ // TODO(dshwang): GLES3 supports gl_FragDepth, not gl_FragDepthEXT.
+ if (extensions.Contains("GL_EXT_frag_depth") || gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_frag_depth");
+ feature_flags_.ext_frag_depth = true;
+ }
+
+ if (extensions.Contains("GL_EXT_shader_texture_lod") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_shader_texture_lod");
+ feature_flags_.ext_shader_texture_lod = true;
+ }
+
+#if !defined(OS_MACOSX)
+ if (workarounds_.disable_egl_khr_fence_sync) {
+ gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync = false;
+ }
+#endif
+ if (workarounds_.disable_arb_sync)
+ gfx::g_driver_gl.ext.b_GL_ARB_sync = false;
+ bool ui_gl_fence_works = gfx::GLFence::IsSupported();
+ UMA_HISTOGRAM_BOOLEAN("GPU.FenceSupport", ui_gl_fence_works);
+
+ feature_flags_.map_buffer_range =
+ is_es3 || extensions.Contains("GL_ARB_map_buffer_range");
+
+ // Really it's part of core OpenGL 2.1 and up, but let's assume the
+ // extension is still advertised.
+ bool has_pixel_buffers =
+ is_es3 || extensions.Contains("GL_ARB_pixel_buffer_object");
+
+ // We will use either glMapBuffer() or glMapBufferRange() for async readbacks.
+ if (has_pixel_buffers && ui_gl_fence_works &&
+ !workarounds_.disable_async_readpixels) {
+ feature_flags_.use_async_readpixels = true;
+ }
+
+ if (is_es3 || extensions.Contains("GL_ARB_sampler_objects")) {
+ feature_flags_.enable_samplers = true;
+ // TODO(dsinclair): Add AddExtensionString("GL_CHROMIUM_sampler_objects")
+ // when available.
+ }
+
+ if ((is_es3 || extensions.Contains("GL_EXT_discard_framebuffer")) &&
+ !workarounds_.disable_ext_discard_framebuffer) {
+ // DiscardFramebufferEXT is automatically bound to InvalidateFramebuffer.
+ AddExtensionString("GL_EXT_discard_framebuffer");
+ feature_flags_.ext_discard_framebuffer = true;
+ }
+
+ if (ui_gl_fence_works) {
+ AddExtensionString("GL_CHROMIUM_sync_query");
+ feature_flags_.chromium_sync_query = true;
+ }
+
+ if (extensions.Contains("GL_NV_path_rendering")) {
+ if (extensions.Contains("GL_EXT_direct_state_access") || is_es3) {
+ AddExtensionString("GL_CHROMIUM_path_rendering");
+ feature_flags_.chromium_path_rendering = true;
+ validators_.g_l_state.AddValue(GL_PATH_MODELVIEW_MATRIX_CHROMIUM);
+ validators_.g_l_state.AddValue(GL_PATH_PROJECTION_MATRIX_CHROMIUM);
+ }
+ }
+}
+
+void FeatureInfo::AddExtensionString(const char* s) {
+ std::string str(s);
+ size_t pos = extensions_.find(str);
+ while (pos != std::string::npos &&
+ pos + str.length() < extensions_.length() &&
+ extensions_.substr(pos + str.length(), 1) != " ") {
+ // This extension name is a substring of another.
+ pos = extensions_.find(str, pos + str.length());
+ }
+ if (pos == std::string::npos) {
+ extensions_ += (extensions_.empty() ? "" : " ") + str;
+ }
+}
+
+FeatureInfo::~FeatureInfo() {
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/feature_info.h b/gpu/command_buffer/service/feature_info.h
new file mode 100644
index 0000000..740b833
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info.h
@@ -0,0 +1,152 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
+#define GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
+
+#include <set>
+#include <string>
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "gpu/gpu_export.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// FeatureInfo records the features that are available for a ContextGroup.
+class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
+ public:
+ struct FeatureFlags {
+ FeatureFlags();
+
+ bool chromium_color_buffer_float_rgba;
+ bool chromium_color_buffer_float_rgb;
+ bool chromium_framebuffer_multisample;
+ bool chromium_sync_query;
+ // Use glBlitFramebuffer() and glRenderbufferStorageMultisample() with
+ // GL_EXT_framebuffer_multisample-style semantics, since they are exposed
+ // as core GL functions on this implementation.
+ bool use_core_framebuffer_multisample;
+ bool multisampled_render_to_texture;
+ // Use the IMG GLenum values and functions rather than EXT.
+ bool use_img_for_multisampled_render_to_texture;
+ bool oes_standard_derivatives;
+ bool oes_egl_image_external;
+ bool oes_depth24;
+ bool oes_compressed_etc1_rgb8_texture;
+ bool packed_depth24_stencil8;
+ bool npot_ok;
+ bool enable_texture_float_linear;
+ bool enable_texture_half_float_linear;
+ bool angle_translated_shader_source;
+ bool angle_pack_reverse_row_order;
+ bool arb_texture_rectangle;
+ bool angle_instanced_arrays;
+ bool occlusion_query_boolean;
+ bool use_arb_occlusion_query2_for_occlusion_query_boolean;
+ bool use_arb_occlusion_query_for_occlusion_query_boolean;
+ bool native_vertex_array_object;
+ bool ext_texture_format_bgra8888;
+ bool enable_shader_name_hashing;
+ bool enable_samplers;
+ bool ext_draw_buffers;
+ bool ext_frag_depth;
+ bool ext_shader_texture_lod;
+ bool use_async_readpixels;
+ bool map_buffer_range;
+ bool ext_discard_framebuffer;
+ bool angle_depth_texture;
+ bool is_angle;
+ bool is_swiftshader;
+ bool angle_texture_usage;
+ bool ext_texture_storage;
+ bool chromium_path_rendering;
+ };
+
+ struct Workarounds {
+ Workarounds();
+
+#define GPU_OP(type, name) bool name;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+
+ // Note: 0 here means use driver limit.
+ GLint max_texture_size;
+ GLint max_cube_map_texture_size;
+ GLint max_fragment_uniform_vectors;
+ GLint max_varying_vectors;
+ GLint max_vertex_uniform_vectors;
+ };
+
+ // Constructor with workarounds taken from the current process's CommandLine
+ FeatureInfo();
+
+ // Constructor with workarounds taken from |command_line|
+ FeatureInfo(const base::CommandLine& command_line);
+
+ // Initializes the feature information. Needs a current GL context.
+ bool Initialize();
+ bool Initialize(const DisallowedFeatures& disallowed_features);
+
+ const Validators* validators() const {
+ return &validators_;
+ }
+
+ const ValueValidator<GLenum>& GetTextureFormatValidator(GLenum format) {
+ return texture_format_validators_[format];
+ }
+
+ const std::string& extensions() const {
+ return extensions_;
+ }
+
+ const FeatureFlags& feature_flags() const {
+ return feature_flags_;
+ }
+
+ const Workarounds& workarounds() const {
+ return workarounds_;
+ }
+
+ private:
+ friend class base::RefCounted<FeatureInfo>;
+ friend class BufferManagerClientSideArraysTest;
+
+ typedef base::hash_map<GLenum, ValueValidator<GLenum> > ValidatorMap;
+ ValidatorMap texture_format_validators_;
+
+ ~FeatureInfo();
+
+ void AddExtensionString(const char* s);
+ void InitializeBasicState(const base::CommandLine& command_line);
+ void InitializeFeatures();
+
+ Validators validators_;
+
+ DisallowedFeatures disallowed_features_;
+
+ // The extensions string returned by glGetString(GL_EXTENSIONS);
+ std::string extensions_;
+
+ // Flags for some features
+ FeatureFlags feature_flags_;
+
+ // Flags for Workarounds.
+ Workarounds workarounds_;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureInfo);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
diff --git a/gpu/command_buffer/service/feature_info_unittest.cc b/gpu/command_buffer/service/feature_info_unittest.cc
new file mode 100644
index 0000000..937dd1e
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info_unittest.cc
@@ -0,0 +1,1296 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/feature_info.h"
+
+#include "base/command_line.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Not;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+const char kGLRendererStringANGLE[] = "ANGLE (some renderer)";
+} // anonymous namespace
+
+class FeatureInfoTest : public GpuServiceTest {
+ public:
+ FeatureInfoTest() {
+ }
+
+ void SetupInitExpectations(const char* extensions) {
+ SetupInitExpectationsWithGLVersion(extensions, "", "");
+ }
+
+ void SetupInitExpectationsWithGLVersion(
+ const char* extensions, const char* renderer, const char* version) {
+ GpuServiceTest::SetUpWithGLVersion(version, extensions);
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ gl_.get(), extensions, renderer, version);
+ info_ = new FeatureInfo();
+ info_->Initialize();
+ }
+
+ void SetupWithCommandLine(const CommandLine& command_line) {
+ GpuServiceTest::SetUp();
+ info_ = new FeatureInfo(command_line);
+ }
+
+ void SetupInitExpectationsWithCommandLine(
+ const char* extensions, const CommandLine& command_line) {
+ GpuServiceTest::SetUpWithGLVersion("2.0", extensions);
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ gl_.get(), extensions, "", "");
+ info_ = new FeatureInfo(command_line);
+ info_->Initialize();
+ }
+
+ void SetupWithoutInit() {
+ GpuServiceTest::SetUp();
+ info_ = new FeatureInfo();
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE {
+ // Do nothing here, since we are using the explicit Setup*() functions.
+ }
+
+ virtual void TearDown() OVERRIDE {
+ info_ = NULL;
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_refptr<FeatureInfo> info_;
+};
+
+namespace {
+
+struct FormatInfo {
+ GLenum format;
+ const GLenum* types;
+ size_t count;
+};
+
+} // anonymous namespace.
+
+TEST_F(FeatureInfoTest, Basic) {
+ SetupWithoutInit();
+ // Test it starts off uninitialized.
+ EXPECT_FALSE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_FALSE(info_->feature_flags().use_core_framebuffer_multisample);
+ EXPECT_FALSE(info_->feature_flags().multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_FALSE(info_->feature_flags().npot_ok);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_FALSE(info_->feature_flags().oes_egl_image_external);
+ EXPECT_FALSE(info_->feature_flags().oes_depth24);
+ EXPECT_FALSE(info_->feature_flags().packed_depth24_stencil8);
+ EXPECT_FALSE(info_->feature_flags().angle_translated_shader_source);
+ EXPECT_FALSE(info_->feature_flags().angle_pack_reverse_row_order);
+ EXPECT_FALSE(info_->feature_flags().arb_texture_rectangle);
+ EXPECT_FALSE(info_->feature_flags().angle_instanced_arrays);
+ EXPECT_FALSE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+ EXPECT_FALSE(info_->feature_flags().map_buffer_range);
+ EXPECT_FALSE(info_->feature_flags().use_async_readpixels);
+ EXPECT_FALSE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_FALSE(info_->feature_flags().angle_depth_texture);
+ EXPECT_FALSE(info_->feature_flags().is_angle);
+
+#define GPU_OP(type, name) EXPECT_FALSE(info_->workarounds().name);
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ EXPECT_EQ(0, info_->workarounds().max_texture_size);
+ EXPECT_EQ(0, info_->workarounds().max_cube_map_texture_size);
+
+ // Test good types.
+ {
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ const ValueValidator<GLenum>& validator =
+ info_->GetTextureFormatValidator(info.format);
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ EXPECT_TRUE(validator.IsValid(info.types[jj]));
+ }
+ }
+ }
+
+ // Test some bad types
+ {
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ GL_FLOAT,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_FLOAT,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_FLOAT,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_FLOAT,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ GL_FLOAT,
+ };
+ static const GLenum kBGRATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_FLOAT,
+ };
+ static const GLenum kDepthTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+ GL_UNSIGNED_INT,
+ GL_FLOAT,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ { GL_BGRA_EXT, kBGRATypes, arraysize(kBGRATypes), },
+ { GL_DEPTH_COMPONENT, kDepthTypes, arraysize(kDepthTypes), },
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ const ValueValidator<GLenum>& validator =
+ info_->GetTextureFormatValidator(info.format);
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ EXPECT_FALSE(validator.IsValid(info.types[jj]));
+ }
+ }
+ }
+}
+
+TEST_F(FeatureInfoTest, InitializeNoExtensions) {
+ SetupInitExpectations("");
+ // Check default extensions are there
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_resource_safe"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_strict_attribs"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_ANGLE_translated_shader_source"));
+
+ // Check a couple of random extensions that should not be there.
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_npot")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_compression_dxt1")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_texture_compression_dxt3")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_texture_compression_dxt5")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_ANGLE_texture_usage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_compressed_ETC1_RGB8_texture")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_AMD_compressed_ATC_texture")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_IMG_texture_compression_pvrtc")));
+ EXPECT_FALSE(info_->feature_flags().npot_ok);
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGB_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGBA32F));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGB32F));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(
+ GL_UNSIGNED_INT_24_8));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH_COMPONENT24));
+ EXPECT_FALSE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_USAGE_ANGLE));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT16));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT32_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH24_STENCIL8_OES));
+ EXPECT_FALSE(info_->validators()->equation.IsValid(GL_MIN_EXT));
+ EXPECT_FALSE(info_->validators()->equation.IsValid(GL_MAX_EXT));
+ EXPECT_FALSE(info_->feature_flags().chromium_sync_query);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithANGLE) {
+ SetupInitExpectationsWithGLVersion("", kGLRendererStringANGLE, "");
+ EXPECT_TRUE(info_->feature_flags().is_angle);
+}
+
+TEST_F(FeatureInfoTest, InitializeNPOTExtensionGLES) {
+ SetupInitExpectations("GL_OES_texture_npot");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_npot"));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+}
+
+TEST_F(FeatureInfoTest, InitializeNPOTExtensionGL) {
+ SetupInitExpectations("GL_ARB_texture_non_power_of_two");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_npot"));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+}
+
+TEST_F(FeatureInfoTest, InitializeDXTExtensionGLES2) {
+ SetupInitExpectations("GL_EXT_texture_compression_dxt1");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_compression_dxt1"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeDXTExtensionGL) {
+ SetupInitExpectations("GL_EXT_texture_compression_s3tc");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_compression_dxt1"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_texture_compression_dxt3"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_texture_compression_dxt5"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888GLES2) {
+ SetupInitExpectations("GL_EXT_texture_format_BGRA8888");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888GL) {
+ SetupInitExpectations("GL_EXT_bgra");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_read_format_bgra"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_renderbuffer_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888Apple) {
+ SetupInitExpectations("GL_APPLE_texture_format_BGRA8888");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_read_format_bgra) {
+ SetupInitExpectations("GL_EXT_read_format_bgra");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_read_format_bgra"));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage) {
+ SetupInitExpectations("GL_EXT_texture_storage");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_IMMUTABLE_FORMAT_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA16F_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_storage) {
+ SetupInitExpectations("GL_ARB_texture_storage");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_IMMUTABLE_FORMAT_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_BGRA) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_EXT_bgra");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_storage_BGRA) {
+ SetupInitExpectations("GL_ARB_texture_storage GL_EXT_bgra");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_BGRA8888) {
+ SetupInitExpectations(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_float) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_OES_texture_float");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA32F_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_half_float) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_OES_texture_half_float");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA16F_EXT));
+}
+
+// Check how to handle ES, texture_storage and BGRA combination; 8 tests.
+
+// 1- ES2 + GL_EXT_texture_storage -> GL_EXT_texture_storage (and no
+// GL_EXT_texture_format_BGRA8888 - we don't claim to handle GL_BGRA8 in
+// glTexStorage2DEXT)
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_storage) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+// 2- ES2 + GL_EXT_texture_storage + (GL_EXT_texture_format_BGRA8888 or
+// GL_APPLE_texture_format_bgra8888)
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_storage_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888",
+ "",
+ "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 3- ES2 + GL_EXT_texture_format_BGRA8888 or GL_APPLE_texture_format_bgra8888
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_format_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_format_BGRA8888", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 4- ES2 (neither GL_EXT_texture_storage nor GL_EXT_texture_format_BGRA8888) ->
+// nothing
+TEST_F(FeatureInfoTest, InitializeGLES2_neither_texture_storage_nor_BGRA) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+// 5- ES3 + GL_EXT_texture_format_BGRA8888 -> GL_EXT_texture_format_BGRA8888
+// (we can't expose GL_EXT_texture_storage because we fail the GL_BGRA8
+// requirement)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage_EXT_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_format_BGRA8888", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 6- ES3 + GL_APPLE_texture_format_bgra8888 -> GL_EXT_texture_storage +
+// GL_EXT_texture_format_BGRA8888 (driver promises to handle GL_BGRA8 by
+// exposing GL_APPLE_texture_format_bgra8888)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage_APPLE_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_APPLE_texture_format_BGRA8888", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 7- ES3 + GL_EXT_texture_storage + GL_EXT_texture_format_BGRA8888 ->
+// GL_EXT_texture_storage + GL_EXT_texture_format_BGRA8888 (driver promises to
+// handle GL_BGRA8 by exposing GL_EXT_texture_storage)
+TEST_F(FeatureInfoTest, InitializeGLES3_EXT_texture_storage_EXT_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888",
+ "",
+ "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 8- ES3 + none of the above -> GL_EXT_texture_storage (and no
+// GL_EXT_texture_format_BGRA8888 - we don't claim to handle GL_BGRA8)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_float) {
+ SetupInitExpectations("GL_ARB_texture_float");
+ EXPECT_TRUE(info_->feature_flags().chromium_color_buffer_float_rgba);
+ EXPECT_TRUE(info_->feature_flags().chromium_color_buffer_float_rgb);
+ std::string extensions = info_->extensions() + " ";
+ EXPECT_THAT(extensions, HasSubstr("GL_CHROMIUM_color_buffer_float_rgb "));
+ EXPECT_THAT(extensions, HasSubstr("GL_CHROMIUM_color_buffer_float_rgba"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGBA32F));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGB32F));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_floatGLES2) {
+ SetupInitExpectations("GL_OES_texture_float");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_half_float")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_float_linearGLES2) {
+ SetupInitExpectations("GL_OES_texture_float GL_OES_texture_float_linear");
+ EXPECT_TRUE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_half_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float_linear"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_half_floatGLES2) {
+ SetupInitExpectations("GL_OES_texture_half_float");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_half_float_linearGLES2) {
+ SetupInitExpectations(
+ "GL_OES_texture_half_float GL_OES_texture_half_float_linear");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_TRUE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_texture_half_float_linear"));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_framebuffer_multisample) {
+ SetupInitExpectations("GL_EXT_framebuffer_multisample");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeANGLE_framebuffer_multisample) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_ANGLE_framebuffer_multisample", kGLRendererStringANGLE, "");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+// We don't allow ANGLE_framebuffer_multisample on non-ANGLE implementations,
+// because we wouldn't be choosing the right driver entry point and because the
+// extension was falsely advertised on some Android devices (crbug.com/165736).
+TEST_F(FeatureInfoTest, InitializeANGLE_framebuffer_multisampleWithoutANGLE) {
+ SetupInitExpectations("GL_ANGLE_framebuffer_multisample");
+ EXPECT_FALSE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_framebuffer_multisample")));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_multisampled_render_to_texture) {
+ SetupInitExpectations("GL_EXT_multisampled_render_to_texture");
+ EXPECT_TRUE(info_->feature_flags(
+ ).multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_multisampled_render_to_texture"));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_parameter.IsValid(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeIMG_multisampled_render_to_texture) {
+ SetupInitExpectations("GL_IMG_multisampled_render_to_texture");
+ EXPECT_TRUE(info_->feature_flags(
+ ).multisampled_render_to_texture);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_multisampled_render_to_texture"));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_parameter.IsValid(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_filter_anisotropic) {
+ SetupInitExpectations("GL_EXT_texture_filter_anisotropic");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_filter_anisotropic"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_ARB_depth_texture) {
+ SetupInitExpectations("GL_ARB_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_ARB_depth_texture) {
+ SetupInitExpectations("GL_OES_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeANGLE_depth_texture) {
+ SetupInitExpectations("GL_ANGLE_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_ANGLE_depth_texture")));
+ EXPECT_TRUE(info_->feature_flags().angle_depth_texture);
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT16));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT32_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH24_STENCIL8_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_packed_depth_stencil) {
+ SetupInitExpectations("GL_EXT_packed_depth_stencil");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_packed_depth_stencil) {
+ SetupInitExpectations("GL_OES_packed_depth_stencil");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest,
+ InitializeOES_packed_depth_stencil_and_GL_ARB_depth_texture) {
+ SetupInitExpectations("GL_OES_packed_depth_stencil GL_ARB_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(
+ GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_depth24) {
+ SetupInitExpectations("GL_OES_depth24");
+ EXPECT_TRUE(info_->feature_flags().oes_depth24);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_depth24"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH_COMPONENT24));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_standard_derivatives) {
+ SetupInitExpectations("GL_OES_standard_derivatives");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_standard_derivatives"));
+ EXPECT_TRUE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_TRUE(info_->validators()->hint_target.IsValid(
+ GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_rgb8_rgba8) {
+ SetupInitExpectations("GL_OES_rgb8_rgba8");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_rgb8_rgba8"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_RGB8_OES));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_RGBA8_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_EGL_image_external) {
+ SetupInitExpectations("GL_OES_EGL_image_external");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_EGL_image_external"));
+ EXPECT_TRUE(info_->feature_flags().oes_egl_image_external);
+ EXPECT_TRUE(info_->validators()->texture_bind_target.IsValid(
+ GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_TRUE(info_->validators()->get_tex_param_target.IsValid(
+ GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_TEXTURE_BINDING_EXTERNAL_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_compressed_ETC1_RGB8_texture) {
+ SetupInitExpectations("GL_OES_compressed_ETC1_RGB8_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_compressed_ETC1_RGB8_texture"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_ETC1_RGB8_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeAMD_compressed_ATC_texture) {
+ SetupInitExpectations("GL_AMD_compressed_ATC_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_AMD_compressed_ATC_texture"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGB_AMD));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD));
+}
+
+TEST_F(FeatureInfoTest, InitializeIMG_texture_compression_pvrtc) {
+ SetupInitExpectations("GL_IMG_texture_compression_pvrtc");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_IMG_texture_compression_pvrtc"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_occlusion_query_boolean) {
+ SetupInitExpectations("GL_EXT_occlusion_query_boolean");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_occlusion_query) {
+ SetupInitExpectations("GL_ARB_occlusion_query");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_occlusion_query2) {
+ SetupInitExpectations("GL_ARB_occlusion_query2 GL_ARB_occlusion_query2");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_vertex_array_object) {
+ SetupInitExpectations("GL_OES_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_vertex_array_object) {
+ SetupInitExpectations("GL_ARB_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeAPPLE_vertex_array_object) {
+ SetupInitExpectations("GL_APPLE_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeNo_vertex_array_object) {
+ SetupInitExpectations("");
+ // Even if the native extensions are not available the implementation
+ // may still emulate the GL_OES_vertex_array_object functionality. In this
+ // scenario native_vertex_array_object must be false.
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_element_index_uint) {
+ SetupInitExpectations("GL_OES_element_index_uint");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_element_index_uint"));
+ EXPECT_TRUE(info_->validators()->index_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest, InitializeVAOsWithClientSideArrays) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
+ SetupInitExpectationsWithCommandLine("GL_OES_vertex_array_object",
+ command_line);
+ EXPECT_TRUE(info_->workarounds().use_client_side_arrays_for_stream_buffers);
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_blend_minmax) {
+ SetupInitExpectations("GL_EXT_blend_minmax");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_blend_minmax"));
+ EXPECT_TRUE(info_->validators()->equation.IsValid(GL_MIN_EXT));
+ EXPECT_TRUE(info_->validators()->equation.IsValid(GL_MAX_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_frag_depth) {
+ SetupInitExpectations("GL_EXT_frag_depth");
+ EXPECT_TRUE(info_->feature_flags().ext_frag_depth);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_frag_depth"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_shader_texture_lod) {
+ SetupInitExpectations("GL_EXT_shader_texture_lod");
+ EXPECT_TRUE(info_->feature_flags().ext_shader_texture_lod);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_shader_texture_lod"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_discard_framebuffer) {
+ SetupInitExpectations("GL_EXT_discard_framebuffer");
+ EXPECT_TRUE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_discard_framebuffer"));
+}
+
+TEST_F(FeatureInfoTest, InitializeSamplersWithARBSamplerObjects) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_ARB_sampler_objects", "", "OpenGL 3.0");
+ EXPECT_TRUE(info_->feature_flags().enable_samplers);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithES3) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 3.0");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_TRUE(info_->feature_flags().use_core_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->feature_flags().use_async_readpixels);
+ EXPECT_TRUE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_TRUE(info_->feature_flags().oes_depth24);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT)
+ .IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT)
+ .IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL)
+ .IsValid(GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->feature_flags().packed_depth24_stencil8);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_depth24"));
+ EXPECT_TRUE(
+ info_->validators()->render_buffer_format.IsValid(GL_DEPTH_COMPONENT24));
+ EXPECT_TRUE(
+ info_->validators()->render_buffer_format.IsValid(GL_DEPTH24_STENCIL8));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+ EXPECT_TRUE(info_->feature_flags().enable_samplers);
+ EXPECT_TRUE(info_->feature_flags().map_buffer_range);
+ EXPECT_TRUE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_discard_framebuffer"));
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeWithoutSamplers) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL GL 3.0");
+ EXPECT_FALSE(info_->feature_flags().enable_samplers);
+}
+
+TEST_F(FeatureInfoTest, ParseDriverBugWorkaroundsSingle) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::EXIT_ON_CONTEXT_LOST));
+ // Workarounds should get parsed without the need for a context.
+ SetupWithCommandLine(command_line);
+ EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
+}
+
+TEST_F(FeatureInfoTest, ParseDriverBugWorkaroundsMultiple) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::EXIT_ON_CONTEXT_LOST) + "," +
+ base::IntToString(gpu::MAX_CUBE_MAP_TEXTURE_SIZE_LIMIT_1024) + "," +
+ base::IntToString(gpu::MAX_TEXTURE_SIZE_LIMIT_4096));
+ // Workarounds should get parsed without the need for a context.
+ SetupWithCommandLine(command_line);
+ EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
+ EXPECT_EQ(1024, info_->workarounds().max_cube_map_texture_size);
+ EXPECT_EQ(4096, info_->workarounds().max_texture_size);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithARBSync) {
+ SetupInitExpectations("GL_ARB_sync");
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeWithNVFence) {
+ SetupInitExpectations("GL_NV_fence");
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, ARBSyncDisabled) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::DISABLE_ARB_SYNC));
+ SetupInitExpectationsWithCommandLine("GL_ARB_sync", command_line);
+ EXPECT_FALSE(info_->feature_flags().chromium_sync_query);
+ EXPECT_FALSE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeCHROMIUM_path_rendering) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_NV_path_rendering GL_EXT_direct_state_access", "", "4.3");
+ EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
+}
+
+TEST_F(FeatureInfoTest, InitializeCHROMIUM_path_rendering2) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_NV_path_rendering", "", "OpenGL ES 3.1");
+ EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
+}
+
+TEST_F(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering) {
+ SetupInitExpectationsWithGLVersion("", "", "4.3");
+ EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_path_rendering")));
+}
+
+TEST_F(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering2) {
+ SetupInitExpectationsWithGLVersion("GL_NV_path_rendering", "", "4.3");
+ EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_path_rendering")));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/framebuffer_manager.cc b/gpu/command_buffer/service/framebuffer_manager.cc
new file mode 100644
index 0000000..b8026c0
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager.cc
@@ -0,0 +1,763 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+DecoderFramebufferState::DecoderFramebufferState()
+ : clear_state_dirty(false),
+ bound_read_framebuffer(NULL),
+ bound_draw_framebuffer(NULL) {
+}
+
+DecoderFramebufferState::~DecoderFramebufferState() {
+}
+
+Framebuffer::FramebufferComboCompleteMap*
+ Framebuffer::framebuffer_combo_complete_map_;
+
+// Framebuffer completeness is not cacheable on OS X because of dynamic
+// graphics switching.
+// http://crbug.com/180876
+#if defined(OS_MACOSX)
+bool Framebuffer::allow_framebuffer_combo_complete_map_ = false;
+#else
+bool Framebuffer::allow_framebuffer_combo_complete_map_ = true;
+#endif
+
+void Framebuffer::ClearFramebufferCompleteComboMap() {
+ if (framebuffer_combo_complete_map_) {
+ framebuffer_combo_complete_map_->clear();
+ }
+}
+
+class RenderbufferAttachment
+ : public Framebuffer::Attachment {
+ public:
+ explicit RenderbufferAttachment(
+ Renderbuffer* renderbuffer)
+ : renderbuffer_(renderbuffer) {
+ }
+
+ virtual GLsizei width() const OVERRIDE {
+ return renderbuffer_->width();
+ }
+
+ virtual GLsizei height() const OVERRIDE {
+ return renderbuffer_->height();
+ }
+
+ virtual GLenum internal_format() const OVERRIDE {
+ return renderbuffer_->internal_format();
+ }
+
+ virtual GLenum texture_type() const OVERRIDE {
+ return 0;
+ }
+
+ virtual GLsizei samples() const OVERRIDE {
+ return renderbuffer_->samples();
+ }
+
+ virtual GLuint object_name() const OVERRIDE {
+ return renderbuffer_->client_id();
+ }
+
+ virtual bool cleared() const OVERRIDE {
+ return renderbuffer_->cleared();
+ }
+
+ virtual void SetCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* /* texture_manager */,
+ bool cleared) OVERRIDE {
+ renderbuffer_manager->SetCleared(renderbuffer_.get(), cleared);
+ }
+
+ virtual bool IsTexture(
+ TextureRef* /* texture */) const OVERRIDE {
+ return false;
+ }
+
+ virtual bool IsRenderbuffer(
+ Renderbuffer* renderbuffer) const OVERRIDE {
+ return renderbuffer_.get() == renderbuffer;
+ }
+
+ virtual bool CanRenderTo() const OVERRIDE {
+ return true;
+ }
+
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer) const OVERRIDE {
+ // Nothing to do for renderbuffers.
+ }
+
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) OVERRIDE {
+ uint32 need = GLES2Util::GetChannelsNeededForAttachmentType(
+ attachment_type, max_color_attachments);
+ uint32 have = GLES2Util::GetChannelsForFormat(internal_format());
+ return (need & have) != 0;
+ }
+
+ Renderbuffer* renderbuffer() const {
+ return renderbuffer_.get();
+ }
+
+ virtual size_t GetSignatureSize(
+ TextureManager* texture_manager) const OVERRIDE {
+ return renderbuffer_->GetSignatureSize();
+ }
+
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const OVERRIDE {
+ DCHECK(signature);
+ renderbuffer_->AddToSignature(signature);
+ }
+
+ virtual void OnWillRenderTo() const OVERRIDE {}
+ virtual void OnDidRenderTo() const OVERRIDE {}
+
+ protected:
+ virtual ~RenderbufferAttachment() { }
+
+ private:
+ scoped_refptr<Renderbuffer> renderbuffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(RenderbufferAttachment);
+};
+
+class TextureAttachment
+ : public Framebuffer::Attachment {
+ public:
+ TextureAttachment(
+ TextureRef* texture_ref, GLenum target, GLint level, GLsizei samples)
+ : texture_ref_(texture_ref),
+ target_(target),
+ level_(level),
+ samples_(samples) {
+ }
+
+ virtual GLsizei width() const OVERRIDE {
+ GLsizei temp_width = 0;
+ GLsizei temp_height = 0;
+ texture_ref_->texture()->GetLevelSize(
+ target_, level_, &temp_width, &temp_height);
+ return temp_width;
+ }
+
+ virtual GLsizei height() const OVERRIDE {
+ GLsizei temp_width = 0;
+ GLsizei temp_height = 0;
+ texture_ref_->texture()->GetLevelSize(
+ target_, level_, &temp_width, &temp_height);
+ return temp_height;
+ }
+
+ virtual GLenum internal_format() const OVERRIDE {
+ GLenum temp_type = 0;
+ GLenum temp_internal_format = 0;
+ texture_ref_->texture()->GetLevelType(
+ target_, level_, &temp_type, &temp_internal_format);
+ return temp_internal_format;
+ }
+
+ virtual GLenum texture_type() const OVERRIDE {
+ GLenum temp_type = 0;
+ GLenum temp_internal_format = 0;
+ texture_ref_->texture()->GetLevelType(
+ target_, level_, &temp_type, &temp_internal_format);
+ return temp_type;
+ }
+
+ virtual GLsizei samples() const OVERRIDE {
+ return samples_;
+ }
+
+ virtual GLuint object_name() const OVERRIDE {
+ return texture_ref_->client_id();
+ }
+
+ virtual bool cleared() const OVERRIDE {
+ return texture_ref_->texture()->IsLevelCleared(target_, level_);
+ }
+
+ virtual void SetCleared(
+ RenderbufferManager* /* renderbuffer_manager */,
+ TextureManager* texture_manager,
+ bool cleared) OVERRIDE {
+ texture_manager->SetLevelCleared(
+ texture_ref_.get(), target_, level_, cleared);
+ }
+
+ virtual bool IsTexture(TextureRef* texture) const OVERRIDE {
+ return texture == texture_ref_.get();
+ }
+
+ virtual bool IsRenderbuffer(
+ Renderbuffer* /* renderbuffer */)
+ const OVERRIDE {
+ return false;
+ }
+
+ TextureRef* texture() const {
+ return texture_ref_.get();
+ }
+
+ virtual bool CanRenderTo() const OVERRIDE {
+ return texture_ref_->texture()->CanRenderTo();
+ }
+
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer)
+ const OVERRIDE {
+ texture_ref_->texture()->DetachFromFramebuffer();
+ framebuffer->OnTextureRefDetached(texture_ref_.get());
+ }
+
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) OVERRIDE {
+ GLenum type = 0;
+ GLenum internal_format = 0;
+ if (!texture_ref_->texture()->GetLevelType(
+ target_, level_, &type, &internal_format)) {
+ return false;
+ }
+ uint32 need = GLES2Util::GetChannelsNeededForAttachmentType(
+ attachment_type, max_color_attachments);
+ uint32 have = GLES2Util::GetChannelsForFormat(internal_format);
+
+ // Workaround for NVIDIA drivers that incorrectly expose these formats as
+ // renderable:
+ if (internal_format == GL_LUMINANCE || internal_format == GL_ALPHA ||
+ internal_format == GL_LUMINANCE_ALPHA) {
+ return false;
+ }
+ return (need & have) != 0;
+ }
+
+ virtual size_t GetSignatureSize(
+ TextureManager* texture_manager) const OVERRIDE {
+ return texture_manager->GetSignatureSize();
+ }
+
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const OVERRIDE {
+ DCHECK(signature);
+ texture_manager->AddToSignature(
+ texture_ref_.get(), target_, level_, signature);
+ }
+
+ virtual void OnWillRenderTo() const OVERRIDE {
+ texture_ref_->texture()->OnWillModifyPixels();
+ }
+
+ virtual void OnDidRenderTo() const OVERRIDE {
+ texture_ref_->texture()->OnDidModifyPixels();
+ }
+
+ protected:
+ virtual ~TextureAttachment() {}
+
+ private:
+ scoped_refptr<TextureRef> texture_ref_;
+ GLenum target_;
+ GLint level_;
+ GLsizei samples_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureAttachment);
+};
+
+FramebufferManager::TextureDetachObserver::TextureDetachObserver() {}
+
+FramebufferManager::TextureDetachObserver::~TextureDetachObserver() {}
+
+FramebufferManager::FramebufferManager(
+ uint32 max_draw_buffers, uint32 max_color_attachments)
+ : framebuffer_state_change_count_(1),
+ framebuffer_count_(0),
+ have_context_(true),
+ max_draw_buffers_(max_draw_buffers),
+ max_color_attachments_(max_color_attachments) {
+ DCHECK_GT(max_draw_buffers_, 0u);
+ DCHECK_GT(max_color_attachments_, 0u);
+}
+
+FramebufferManager::~FramebufferManager() {
+ DCHECK(framebuffers_.empty());
+ // If this triggers, that means something is keeping a reference to a
+ // Framebuffer belonging to this.
+ CHECK_EQ(framebuffer_count_, 0u);
+}
+
+void Framebuffer::MarkAsDeleted() {
+ deleted_ = true;
+ while (!attachments_.empty()) {
+ Attachment* attachment = attachments_.begin()->second.get();
+ attachment->DetachFromFramebuffer(this);
+ attachments_.erase(attachments_.begin());
+ }
+}
+
+void FramebufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ framebuffers_.clear();
+}
+
+void FramebufferManager::StartTracking(
+ Framebuffer* /* framebuffer */) {
+ ++framebuffer_count_;
+}
+
+void FramebufferManager::StopTracking(
+ Framebuffer* /* framebuffer */) {
+ --framebuffer_count_;
+}
+
+void FramebufferManager::CreateFramebuffer(
+ GLuint client_id, GLuint service_id) {
+ std::pair<FramebufferMap::iterator, bool> result =
+ framebuffers_.insert(
+ std::make_pair(
+ client_id,
+ scoped_refptr<Framebuffer>(
+ new Framebuffer(this, service_id))));
+ DCHECK(result.second);
+}
+
+Framebuffer::Framebuffer(
+ FramebufferManager* manager, GLuint service_id)
+ : manager_(manager),
+ deleted_(false),
+ service_id_(service_id),
+ has_been_bound_(false),
+ framebuffer_complete_state_count_id_(0) {
+ manager->StartTracking(this);
+ DCHECK_GT(manager->max_draw_buffers_, 0u);
+ draw_buffers_.reset(new GLenum[manager->max_draw_buffers_]);
+ draw_buffers_[0] = GL_COLOR_ATTACHMENT0;
+ for (uint32 i = 1; i < manager->max_draw_buffers_; ++i)
+ draw_buffers_[i] = GL_NONE;
+}
+
+Framebuffer::~Framebuffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteFramebuffersEXT(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+bool Framebuffer::HasUnclearedAttachment(
+ GLenum attachment) const {
+ AttachmentMap::const_iterator it =
+ attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ const Attachment* attachment = it->second.get();
+ return !attachment->cleared();
+ }
+ return false;
+}
+
+bool Framebuffer::HasUnclearedColorAttachments() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ if (it->first >= GL_COLOR_ATTACHMENT0 &&
+ it->first < GL_COLOR_ATTACHMENT0 + manager_->max_draw_buffers_) {
+ const Attachment* attachment = it->second.get();
+ if (!attachment->cleared())
+ return true;
+ }
+ }
+ return false;
+}
+
+void Framebuffer::ChangeDrawBuffersHelper(bool recover) const {
+ scoped_ptr<GLenum[]> buffers(new GLenum[manager_->max_draw_buffers_]);
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i)
+ buffers[i] = GL_NONE;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ if (it->first >= GL_COLOR_ATTACHMENT0 &&
+ it->first < GL_COLOR_ATTACHMENT0 + manager_->max_draw_buffers_) {
+ buffers[it->first - GL_COLOR_ATTACHMENT0] = it->first;
+ }
+ }
+ bool different = false;
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i) {
+ if (buffers[i] != draw_buffers_[i]) {
+ different = true;
+ break;
+ }
+ }
+ if (different) {
+ if (recover)
+ glDrawBuffersARB(manager_->max_draw_buffers_, draw_buffers_.get());
+ else
+ glDrawBuffersARB(manager_->max_draw_buffers_, buffers.get());
+ }
+}
+
+void Framebuffer::PrepareDrawBuffersForClear() const {
+ bool recover = false;
+ ChangeDrawBuffersHelper(recover);
+}
+
+void Framebuffer::RestoreDrawBuffersAfterClear() const {
+ bool recover = true;
+ ChangeDrawBuffersHelper(recover);
+}
+
+void Framebuffer::MarkAttachmentAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ GLenum attachment,
+ bool cleared) {
+ AttachmentMap::iterator it = attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ Attachment* a = it->second.get();
+ if (a->cleared() != cleared) {
+ a->SetCleared(renderbuffer_manager,
+ texture_manager,
+ cleared);
+ }
+ }
+}
+
+void Framebuffer::MarkAttachmentsAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared) {
+ for (AttachmentMap::iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->cleared() != cleared) {
+ attachment->SetCleared(renderbuffer_manager, texture_manager, cleared);
+ }
+ }
+}
+
+bool Framebuffer::HasDepthAttachment() const {
+ return attachments_.find(GL_DEPTH_STENCIL_ATTACHMENT) != attachments_.end() ||
+ attachments_.find(GL_DEPTH_ATTACHMENT) != attachments_.end();
+}
+
+bool Framebuffer::HasStencilAttachment() const {
+ return attachments_.find(GL_DEPTH_STENCIL_ATTACHMENT) != attachments_.end() ||
+ attachments_.find(GL_STENCIL_ATTACHMENT) != attachments_.end();
+}
+
+GLenum Framebuffer::GetColorAttachmentFormat() const {
+ AttachmentMap::const_iterator it = attachments_.find(GL_COLOR_ATTACHMENT0);
+ if (it == attachments_.end()) {
+ return 0;
+ }
+ const Attachment* attachment = it->second.get();
+ return attachment->internal_format();
+}
+
+GLenum Framebuffer::GetColorAttachmentTextureType() const {
+ AttachmentMap::const_iterator it = attachments_.find(GL_COLOR_ATTACHMENT0);
+ if (it == attachments_.end()) {
+ return 0;
+ }
+ const Attachment* attachment = it->second.get();
+ return attachment->texture_type();
+}
+
+GLenum Framebuffer::IsPossiblyComplete() const {
+ if (attachments_.empty()) {
+ return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+ }
+
+ GLsizei width = -1;
+ GLsizei height = -1;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ GLenum attachment_type = it->first;
+ Attachment* attachment = it->second.get();
+ if (!attachment->ValidForAttachmentType(attachment_type,
+ manager_->max_color_attachments_)) {
+ return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+ }
+ if (width < 0) {
+ width = attachment->width();
+ height = attachment->height();
+ if (width == 0 || height == 0) {
+ return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+ }
+ } else {
+ if (attachment->width() != width || attachment->height() != height) {
+ return GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT;
+ }
+ }
+
+ if (!attachment->CanRenderTo()) {
+ return GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ }
+
+ // This does not mean the framebuffer is actually complete. It just means our
+ // checks passed.
+ return GL_FRAMEBUFFER_COMPLETE;
+}
+
+GLenum Framebuffer::GetStatus(
+ TextureManager* texture_manager, GLenum target) const {
+ // Check if we have this combo already.
+ std::string signature;
+ if (allow_framebuffer_combo_complete_map_) {
+ size_t signature_size = sizeof(target);
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ signature_size += sizeof(it->first) +
+ attachment->GetSignatureSize(texture_manager);
+ }
+
+ signature.reserve(signature_size);
+ signature.append(reinterpret_cast<const char*>(&target), sizeof(target));
+
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ signature.append(reinterpret_cast<const char*>(&it->first),
+ sizeof(it->first));
+ attachment->AddToSignature(texture_manager, &signature);
+ }
+ DCHECK(signature.size() == signature_size);
+
+ if (!framebuffer_combo_complete_map_) {
+ framebuffer_combo_complete_map_ = new FramebufferComboCompleteMap();
+ }
+
+ FramebufferComboCompleteMap::const_iterator it =
+ framebuffer_combo_complete_map_->find(signature);
+ if (it != framebuffer_combo_complete_map_->end()) {
+ return GL_FRAMEBUFFER_COMPLETE;
+ }
+ }
+
+ GLenum result = glCheckFramebufferStatusEXT(target);
+
+ // Insert the new result into the combo map.
+ if (allow_framebuffer_combo_complete_map_ &&
+ result == GL_FRAMEBUFFER_COMPLETE) {
+ framebuffer_combo_complete_map_->insert(std::make_pair(signature, true));
+ }
+
+ return result;
+}
+
+bool Framebuffer::IsCleared() const {
+ // are all the attachments cleaared?
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (!attachment->cleared()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+GLenum Framebuffer::GetDrawBuffer(GLenum draw_buffer) const {
+ GLsizei index = static_cast<GLsizei>(
+ draw_buffer - GL_DRAW_BUFFER0_ARB);
+ CHECK(index >= 0 &&
+ index < static_cast<GLsizei>(manager_->max_draw_buffers_));
+ return draw_buffers_[index];
+}
+
+void Framebuffer::SetDrawBuffers(GLsizei n, const GLenum* bufs) {
+ DCHECK(n <= static_cast<GLsizei>(manager_->max_draw_buffers_));
+ for (GLsizei i = 0; i < n; ++i)
+ draw_buffers_[i] = bufs[i];
+}
+
+bool Framebuffer::HasAlphaMRT() const {
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i) {
+ if (draw_buffers_[i] != GL_NONE) {
+ const Attachment* attachment = GetAttachment(draw_buffers_[i]);
+ if (!attachment)
+ continue;
+ if ((GLES2Util::GetChannelsForFormat(
+ attachment->internal_format()) & 0x0008) != 0)
+ return true;
+ }
+ }
+ return false;
+}
+
+void Framebuffer::UnbindRenderbuffer(
+ GLenum target, Renderbuffer* renderbuffer) {
+ bool done;
+ do {
+ done = true;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->IsRenderbuffer(renderbuffer)) {
+ // TODO(gman): manually detach renderbuffer.
+ // glFramebufferRenderbufferEXT(target, it->first, GL_RENDERBUFFER, 0);
+ AttachRenderbuffer(it->first, NULL);
+ done = false;
+ break;
+ }
+ }
+ } while (!done);
+}
+
+void Framebuffer::UnbindTexture(
+ GLenum target, TextureRef* texture_ref) {
+ bool done;
+ do {
+ done = true;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->IsTexture(texture_ref)) {
+ // TODO(gman): manually detach texture.
+ // glFramebufferTexture2DEXT(target, it->first, GL_TEXTURE_2D, 0, 0);
+ AttachTexture(it->first, NULL, GL_TEXTURE_2D, 0, 0);
+ done = false;
+ break;
+ }
+ }
+ } while (!done);
+}
+
+Framebuffer* FramebufferManager::GetFramebuffer(
+ GLuint client_id) {
+ FramebufferMap::iterator it = framebuffers_.find(client_id);
+ return it != framebuffers_.end() ? it->second.get() : NULL;
+}
+
+void FramebufferManager::RemoveFramebuffer(GLuint client_id) {
+ FramebufferMap::iterator it = framebuffers_.find(client_id);
+ if (it != framebuffers_.end()) {
+ it->second->MarkAsDeleted();
+ framebuffers_.erase(it);
+ }
+}
+
+void Framebuffer::AttachRenderbuffer(
+ GLenum attachment, Renderbuffer* renderbuffer) {
+ const Attachment* a = GetAttachment(attachment);
+ if (a)
+ a->DetachFromFramebuffer(this);
+ if (renderbuffer) {
+ attachments_[attachment] = scoped_refptr<Attachment>(
+ new RenderbufferAttachment(renderbuffer));
+ } else {
+ attachments_.erase(attachment);
+ }
+ framebuffer_complete_state_count_id_ = 0;
+}
+
+void Framebuffer::AttachTexture(
+ GLenum attachment, TextureRef* texture_ref, GLenum target,
+ GLint level, GLsizei samples) {
+ const Attachment* a = GetAttachment(attachment);
+ if (a)
+ a->DetachFromFramebuffer(this);
+ if (texture_ref) {
+ attachments_[attachment] = scoped_refptr<Attachment>(
+ new TextureAttachment(texture_ref, target, level, samples));
+ texture_ref->texture()->AttachToFramebuffer();
+ } else {
+ attachments_.erase(attachment);
+ }
+ framebuffer_complete_state_count_id_ = 0;
+}
+
+const Framebuffer::Attachment*
+ Framebuffer::GetAttachment(
+ GLenum attachment) const {
+ AttachmentMap::const_iterator it = attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ return it->second.get();
+ }
+ return NULL;
+}
+
+void Framebuffer::OnTextureRefDetached(TextureRef* texture) {
+ manager_->OnTextureRefDetached(texture);
+}
+
+void Framebuffer::OnWillRenderTo() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ it->second->OnWillRenderTo();
+ }
+}
+
+void Framebuffer::OnDidRenderTo() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ it->second->OnDidRenderTo();
+ }
+}
+
+bool FramebufferManager::GetClientId(
+ GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (FramebufferMap::const_iterator it = framebuffers_.begin();
+ it != framebuffers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FramebufferManager::MarkAttachmentsAsCleared(
+ Framebuffer* framebuffer,
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager) {
+ DCHECK(framebuffer);
+ framebuffer->MarkAttachmentsAsCleared(renderbuffer_manager,
+ texture_manager,
+ true);
+ MarkAsComplete(framebuffer);
+}
+
+void FramebufferManager::MarkAsComplete(
+ Framebuffer* framebuffer) {
+ DCHECK(framebuffer);
+ framebuffer->MarkAsComplete(framebuffer_state_change_count_);
+}
+
+bool FramebufferManager::IsComplete(
+ Framebuffer* framebuffer) {
+ DCHECK(framebuffer);
+ return framebuffer->framebuffer_complete_state_count_id() ==
+ framebuffer_state_change_count_;
+}
+
+void FramebufferManager::OnTextureRefDetached(TextureRef* texture) {
+ for (TextureDetachObserverVector::iterator it =
+ texture_detach_observers_.begin();
+ it != texture_detach_observers_.end();
+ ++it) {
+ TextureDetachObserver* observer = *it;
+ observer->OnTextureRefDetachedFromFramebuffer(texture);
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/framebuffer_manager.h b/gpu/command_buffer/service/framebuffer_manager.h
new file mode 100644
index 0000000..96bf7fe
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager.h
@@ -0,0 +1,317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class FramebufferManager;
+class Renderbuffer;
+class RenderbufferManager;
+class Texture;
+class TextureRef;
+class TextureManager;
+
+// Info about a particular Framebuffer.
+class GPU_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
+ public:
+ class Attachment : public base::RefCounted<Attachment> {
+ public:
+ virtual GLsizei width() const = 0;
+ virtual GLsizei height() const = 0;
+ virtual GLenum internal_format() const = 0;
+ virtual GLenum texture_type() const = 0;
+ virtual GLsizei samples() const = 0;
+ virtual GLuint object_name() const = 0;
+ virtual bool cleared() const = 0;
+ virtual void SetCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared) = 0;
+ virtual bool IsTexture(TextureRef* texture) const = 0;
+ virtual bool IsRenderbuffer(
+ Renderbuffer* renderbuffer) const = 0;
+ virtual bool CanRenderTo() const = 0;
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer) const = 0;
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) = 0;
+ virtual size_t GetSignatureSize(TextureManager* texture_manager) const = 0;
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const = 0;
+ virtual void OnWillRenderTo() const = 0;
+ virtual void OnDidRenderTo() const = 0;
+
+ protected:
+ friend class base::RefCounted<Attachment>;
+ virtual ~Attachment() {}
+ };
+
+ Framebuffer(FramebufferManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ bool HasUnclearedAttachment(GLenum attachment) const;
+ bool HasUnclearedColorAttachments() const;
+
+ void MarkAttachmentAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ GLenum attachment,
+ bool cleared);
+
+ // Attaches a renderbuffer to a particlar attachment.
+ // Pass null to detach.
+ void AttachRenderbuffer(
+ GLenum attachment, Renderbuffer* renderbuffer);
+
+ // Attaches a texture to a particlar attachment. Pass null to detach.
+ void AttachTexture(
+ GLenum attachment, TextureRef* texture_ref, GLenum target,
+ GLint level, GLsizei samples);
+
+ // Unbinds the given renderbuffer if it is bound.
+ void UnbindRenderbuffer(
+ GLenum target, Renderbuffer* renderbuffer);
+
+ // Unbinds the given texture if it is bound.
+ void UnbindTexture(
+ GLenum target, TextureRef* texture_ref);
+
+ const Attachment* GetAttachment(GLenum attachment) const;
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ void MarkAsValid() {
+ has_been_bound_ = true;
+ }
+
+ bool IsValid() const {
+ return has_been_bound_ && !IsDeleted();
+ }
+
+ bool HasDepthAttachment() const;
+ bool HasStencilAttachment() const;
+ GLenum GetColorAttachmentFormat() const;
+ // If the color attachment is a texture, returns its type; otherwise,
+ // returns 0.
+ GLenum GetColorAttachmentTextureType() const;
+
+ // Verify all the rules in OpenGL ES 2.0.25 4.4.5 are followed.
+ // Returns GL_FRAMEBUFFER_COMPLETE if there are no reasons we know we can't
+ // use this combination of attachments. Otherwise returns the value
+ // that glCheckFramebufferStatus should return for this set of attachments.
+ // Note that receiving GL_FRAMEBUFFER_COMPLETE from this function does
+ // not mean the real OpenGL will consider it framebuffer complete. It just
+ // means it passed our tests.
+ GLenum IsPossiblyComplete() const;
+
+ // Implements optimized glGetFramebufferStatus.
+ GLenum GetStatus(TextureManager* texture_manager, GLenum target) const;
+
+ // Check all attachments are cleared
+ bool IsCleared() const;
+
+ GLenum GetDrawBuffer(GLenum draw_buffer) const;
+
+ void SetDrawBuffers(GLsizei n, const GLenum* bufs);
+
+ // If a color buffer is attached to GL_COLOR_ATTACHMENTi, enable that
+ // draw buffer for glClear().
+ void PrepareDrawBuffersForClear() const;
+
+ // Restore draw buffers states that have been changed in
+ // PrepareDrawBuffersForClear().
+ void RestoreDrawBuffersAfterClear() const;
+
+ // Return true if any draw buffers has an alpha channel.
+ bool HasAlphaMRT() const;
+
+ static void ClearFramebufferCompleteComboMap();
+
+ static bool AllowFramebufferComboCompleteMapForTesting() {
+ return allow_framebuffer_combo_complete_map_;
+ }
+
+ void OnTextureRefDetached(TextureRef* texture);
+ void OnWillRenderTo() const;
+ void OnDidRenderTo() const;
+
+ private:
+ friend class FramebufferManager;
+ friend class base::RefCounted<Framebuffer>;
+
+ ~Framebuffer();
+
+ void MarkAsDeleted();
+
+ void MarkAttachmentsAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared);
+
+ void MarkAsComplete(unsigned state_id) {
+ framebuffer_complete_state_count_id_ = state_id;
+ }
+
+ unsigned framebuffer_complete_state_count_id() const {
+ return framebuffer_complete_state_count_id_;
+ }
+
+ // Helper function for PrepareDrawBuffersForClear() and
+ // RestoreDrawBuffersAfterClear().
+ void ChangeDrawBuffersHelper(bool recover) const;
+
+ // The managers that owns this.
+ FramebufferManager* manager_;
+
+ bool deleted_;
+
+ // Service side framebuffer id.
+ GLuint service_id_;
+
+ // Whether this framebuffer has ever been bound.
+ bool has_been_bound_;
+
+ // state count when this framebuffer was last checked for completeness.
+ unsigned framebuffer_complete_state_count_id_;
+
+ // A map of attachments.
+ typedef base::hash_map<GLenum, scoped_refptr<Attachment> > AttachmentMap;
+ AttachmentMap attachments_;
+
+ // A map of successful frame buffer combos. If it's in the map
+ // it should be FRAMEBUFFER_COMPLETE.
+ typedef base::hash_map<std::string, bool> FramebufferComboCompleteMap;
+ static FramebufferComboCompleteMap* framebuffer_combo_complete_map_;
+ static bool allow_framebuffer_combo_complete_map_;
+
+ scoped_ptr<GLenum[]> draw_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(Framebuffer);
+};
+
+struct DecoderFramebufferState {
+ DecoderFramebufferState();
+ ~DecoderFramebufferState();
+
+ // State saved for clearing so we can clear render buffers and then
+ // restore to these values.
+ bool clear_state_dirty;
+
+ // The currently bound framebuffers
+ scoped_refptr<Framebuffer> bound_read_framebuffer;
+ scoped_refptr<Framebuffer> bound_draw_framebuffer;
+};
+
+// This class keeps track of the frambebuffers and their attached renderbuffers
+// so we can correctly clear them.
+class GPU_EXPORT FramebufferManager {
+ public:
+ class GPU_EXPORT TextureDetachObserver {
+ public:
+ TextureDetachObserver();
+ virtual ~TextureDetachObserver();
+
+ virtual void OnTextureRefDetachedFromFramebuffer(TextureRef* texture) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextureDetachObserver);
+ };
+
+ FramebufferManager(uint32 max_draw_buffers, uint32 max_color_attachments);
+ ~FramebufferManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Framebuffer for the given framebuffer.
+ void CreateFramebuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the framebuffer info for the given framebuffer.
+ Framebuffer* GetFramebuffer(GLuint client_id);
+
+ // Removes a framebuffer info for the given framebuffer.
+ void RemoveFramebuffer(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ void MarkAttachmentsAsCleared(
+ Framebuffer* framebuffer,
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager);
+
+ void MarkAsComplete(Framebuffer* framebuffer);
+
+ bool IsComplete(Framebuffer* framebuffer);
+
+ void IncFramebufferStateChangeCount() {
+ // make sure this is never 0.
+ framebuffer_state_change_count_ =
+ (framebuffer_state_change_count_ + 1) | 0x80000000U;
+ }
+
+ void AddObserver(TextureDetachObserver* observer) {
+ texture_detach_observers_.push_back(observer);
+ }
+
+ void RemoveObserver(TextureDetachObserver* observer) {
+ texture_detach_observers_.erase(
+ std::remove(texture_detach_observers_.begin(),
+ texture_detach_observers_.end(),
+ observer),
+ texture_detach_observers_.end());
+ }
+
+ private:
+ friend class Framebuffer;
+
+ void StartTracking(Framebuffer* framebuffer);
+ void StopTracking(Framebuffer* framebuffer);
+
+ void OnTextureRefDetached(TextureRef* texture);
+
+ // Info for each framebuffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Framebuffer> >
+ FramebufferMap;
+ FramebufferMap framebuffers_;
+
+ // Incremented anytime anything changes that might effect framebuffer
+ // state.
+ unsigned framebuffer_state_change_count_;
+
+ // Counts the number of Framebuffer allocated with 'this' as its manager.
+ // Allows to check no Framebuffer will outlive this.
+ unsigned int framebuffer_count_;
+
+ bool have_context_;
+
+ uint32 max_draw_buffers_;
+ uint32 max_color_attachments_;
+
+ typedef std::vector<TextureDetachObserver*> TextureDetachObserverVector;
+ TextureDetachObserverVector texture_detach_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(FramebufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/gpu/command_buffer/service/framebuffer_manager_unittest.cc
new file mode 100644
index 0000000..1ded558
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -0,0 +1,973 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::Return;
+
+namespace gpu {
+namespace gles2 {
+namespace {
+
+const GLint kMaxTextureSize = 64;
+const GLint kMaxCubemapSize = 64;
+const GLint kMaxRenderbufferSize = 64;
+const GLint kMaxSamples = 4;
+const uint32 kMaxDrawBuffers = 16;
+const uint32 kMaxColorAttachments = 16;
+const bool kDepth24Supported = false;
+const bool kUseDefaultTextures = false;
+
+} // namespace
+
+class FramebufferManagerTest : public GpuServiceTest {
+ public:
+ FramebufferManagerTest()
+ : manager_(1, 1),
+ texture_manager_(NULL,
+ new FeatureInfo(),
+ kMaxTextureSize,
+ kMaxCubemapSize,
+ kUseDefaultTextures),
+ renderbuffer_manager_(NULL,
+ kMaxRenderbufferSize,
+ kMaxSamples,
+ kDepth24Supported) {}
+ virtual ~FramebufferManagerTest() {
+ manager_.Destroy(false);
+ texture_manager_.Destroy(false);
+ renderbuffer_manager_.Destroy(false);
+ }
+
+ protected:
+
+ FramebufferManager manager_;
+ TextureManager texture_manager_;
+ RenderbufferManager renderbuffer_manager_;
+};
+
+TEST_F(FramebufferManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ // Check we can create framebuffer.
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ // Check framebuffer got created.
+ Framebuffer* framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 != NULL);
+ EXPECT_FALSE(framebuffer1->IsDeleted());
+ EXPECT_EQ(kService1Id, framebuffer1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(framebuffer1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent framebuffer.
+ EXPECT_TRUE(manager_.GetFramebuffer(kClient2Id) == NULL);
+ // Check trying to a remove non-existent framebuffers does not crash.
+ manager_.RemoveFramebuffer(kClient2Id);
+ // Check framebuffer gets deleted when last reference is released.
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the framebuffer after we remove it.
+ manager_.RemoveFramebuffer(kClient1Id);
+ EXPECT_TRUE(manager_.GetFramebuffer(kClient1Id) == NULL);
+}
+
+TEST_F(FramebufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create framebuffer.
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ // Check framebuffer got created.
+ Framebuffer* framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check the resources were released.
+ framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 == NULL);
+}
+
+class FramebufferInfoTest : public GpuServiceTest {
+ public:
+ static const GLuint kClient1Id = 1;
+ static const GLuint kService1Id = 11;
+
+ FramebufferInfoTest()
+ : manager_(kMaxDrawBuffers, kMaxColorAttachments),
+ feature_info_(new FeatureInfo()),
+ renderbuffer_manager_(NULL, kMaxRenderbufferSize, kMaxSamples,
+ kDepth24Supported) {
+ texture_manager_.reset(new TextureManager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubemapSize,
+ kUseDefaultTextures));
+ }
+ virtual ~FramebufferInfoTest() {
+ manager_.Destroy(false);
+ texture_manager_->Destroy(false);
+ renderbuffer_manager_.Destroy(false);
+ }
+
+ protected:
+ virtual void SetUp() {
+ InitializeContext("", "");
+ }
+
+ void InitializeContext(const char* gl_version, const char* extensions) {
+ GpuServiceTest::SetUp();
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(gl_.get(),
+ extensions, "", gl_version);
+ feature_info_->Initialize();
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ framebuffer_ = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer_ != NULL);
+ }
+
+ FramebufferManager manager_;
+ Framebuffer* framebuffer_;
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> texture_manager_;
+ RenderbufferManager renderbuffer_manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint FramebufferInfoTest::kClient1Id;
+const GLuint FramebufferInfoTest::kService1Id;
+#endif
+
+TEST_F(FramebufferInfoTest, Basic) {
+ EXPECT_EQ(kService1Id, framebuffer_->service_id());
+ EXPECT_FALSE(framebuffer_->IsDeleted());
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_TRUE(
+ NULL == framebuffer_->GetAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+}
+
+TEST_F(FramebufferInfoTest, AttachRenderbuffer) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kRenderbufferClient2Id = 34;
+ const GLuint kRenderbufferService2Id = 334;
+ const GLuint kRenderbufferClient3Id = 35;
+ const GLuint kRenderbufferService3Id = 335;
+ const GLuint kRenderbufferClient4Id = 36;
+ const GLuint kRenderbufferService4Id = 336;
+ const GLsizei kWidth1 = 16;
+ const GLsizei kHeight1 = 32;
+ const GLenum kFormat1 = GL_RGBA4;
+ const GLenum kBadFormat1 = GL_DEPTH_COMPONENT16;
+ const GLsizei kSamples1 = 0;
+ const GLsizei kWidth2 = 16;
+ const GLsizei kHeight2 = 32;
+ const GLenum kFormat2 = GL_DEPTH_COMPONENT16;
+ const GLsizei kSamples2 = 0;
+ const GLsizei kWidth3 = 16;
+ const GLsizei kHeight3 = 32;
+ const GLenum kFormat3 = GL_STENCIL_INDEX8;
+ const GLsizei kSamples3 = 0;
+ const GLsizei kWidth4 = 16;
+ const GLsizei kHeight4 = 32;
+ const GLenum kFormat4 = GL_STENCIL_INDEX8;
+ const GLsizei kSamples4 = 0;
+
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+
+ // check adding one attachment
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, renderbuffer1);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA4),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Try a format that's not good for COLOR_ATTACHMENT0.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kBadFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Try a good format.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ // check adding another
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient2Id, kRenderbufferService2Id);
+ Renderbuffer* renderbuffer2 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient2Id);
+ ASSERT_TRUE(renderbuffer2 != NULL);
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer2);
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ // The attachment has a size of 0,0 so depending on the order of the map
+ // of attachments it could either get INCOMPLETE_ATTACHMENT because it's 0,0
+ // or INCOMPLETE_DIMENSIONS because it's not the same size as the other
+ // attachment.
+ GLenum status = framebuffer_->IsPossiblyComplete();
+ EXPECT_TRUE(
+ status == GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT ||
+ status == GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT);
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ renderbuffer_manager_.SetInfo(
+ renderbuffer2, kSamples2, kFormat2, kWidth2, kHeight2);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+
+ // check marking them as cleared.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check adding one that is already cleared.
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient3Id, kRenderbufferService3Id);
+ Renderbuffer* renderbuffer3 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient3Id);
+ ASSERT_TRUE(renderbuffer3 != NULL);
+ renderbuffer_manager_.SetInfo(
+ renderbuffer3, kSamples3, kFormat3, kWidth3, kHeight3);
+ renderbuffer_manager_.SetCleared(renderbuffer3, true);
+
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, renderbuffer3);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_TRUE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check marking the renderbuffer as unclared.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_TRUE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth1, attachment->width());
+ EXPECT_EQ(kHeight1, attachment->height());
+ EXPECT_EQ(kSamples1, attachment->samples());
+ EXPECT_EQ(kFormat1, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+
+ // Clear it.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check replacing an attachment
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient4Id, kRenderbufferService4Id);
+ Renderbuffer* renderbuffer4 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient4Id);
+ ASSERT_TRUE(renderbuffer4 != NULL);
+ renderbuffer_manager_.SetInfo(
+ renderbuffer4, kSamples4, kFormat4, kWidth4, kHeight4);
+
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, renderbuffer4);
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ attachment = framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth4, attachment->width());
+ EXPECT_EQ(kHeight4, attachment->height());
+ EXPECT_EQ(kSamples4, attachment->samples());
+ EXPECT_EQ(kFormat4, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check changing an attachment.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer4, kSamples4, kFormat4, kWidth4 + 1, kHeight4);
+
+ attachment = framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth4 + 1, attachment->width());
+ EXPECT_EQ(kHeight4, attachment->height());
+ EXPECT_EQ(kSamples4, attachment->samples());
+ EXPECT_EQ(kFormat4, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check removing it.
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, NULL);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+
+ // Remove depth, Set color to 0 size.
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, NULL);
+ renderbuffer_manager_.SetInfo(renderbuffer1, kSamples1, kFormat1, 0, 0);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Remove color.
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+}
+
+TEST_F(FramebufferInfoTest, AttachTexture) {
+ const GLuint kTextureClient1Id = 33;
+ const GLuint kTextureService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLint kDepth = 1;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLsizei kWidth1 = 16;
+ const GLsizei kHeight1 = 32;
+ const GLint kLevel1 = 0;
+ const GLenum kFormat1 = GL_RGBA;
+ const GLenum kBadFormat1 = GL_DEPTH_COMPONENT16;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLsizei kSamples1 = 0;
+ const GLsizei kWidth2 = 16;
+ const GLsizei kHeight2 = 32;
+ const GLint kLevel2 = 0;
+ const GLenum kFormat2 = GL_RGB;
+ const GLenum kTarget2 = GL_TEXTURE_2D;
+ const GLsizei kSamples2 = 0;
+ const GLsizei kWidth3 = 75;
+ const GLsizei kHeight3 = 123;
+ const GLint kLevel3 = 0;
+ const GLenum kFormat3 = GL_RGB565;
+ const GLsizei kSamples3 = 0;
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ texture_manager_->CreateTexture(kTextureClient1Id, kTextureService1Id);
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClient1Id));
+ ASSERT_TRUE(texture1.get() != NULL);
+
+ // check adding one attachment
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture1.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ // Try format that doesn't work with COLOR_ATTACHMENT0
+ texture_manager_->SetTarget(texture1.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kBadFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kBadFormat1,
+ kType,
+ true);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Try a good format.
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kFormat1,
+ kType,
+ false);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kFormat1,
+ kType,
+ true);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth1, attachment->width());
+ EXPECT_EQ(kHeight1, attachment->height());
+ EXPECT_EQ(kSamples1, attachment->samples());
+ EXPECT_EQ(kFormat1, attachment->internal_format());
+ EXPECT_TRUE(attachment->cleared());
+
+ // Check replacing an attachment
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+ texture_manager_->SetTarget(texture2.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel2,
+ kFormat2,
+ kWidth2,
+ kHeight2,
+ kDepth,
+ kBorder,
+ kFormat2,
+ kType,
+ true);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget2, kLevel2, kSamples2);
+ EXPECT_EQ(static_cast<GLenum>(kFormat2),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ attachment = framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth2, attachment->width());
+ EXPECT_EQ(kHeight2, attachment->height());
+ EXPECT_EQ(kSamples2, attachment->samples());
+ EXPECT_EQ(kFormat2, attachment->internal_format());
+ EXPECT_TRUE(attachment->cleared());
+
+ // Check changing attachment
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel3,
+ kFormat3,
+ kWidth3,
+ kHeight3,
+ kDepth,
+ kBorder,
+ kFormat3,
+ kType,
+ false);
+ attachment = framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth3, attachment->width());
+ EXPECT_EQ(kHeight3, attachment->height());
+ EXPECT_EQ(kSamples3, attachment->samples());
+ EXPECT_EQ(kFormat3, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_EQ(static_cast<GLenum>(kFormat3),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ // Set to size 0
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel3,
+ kFormat3,
+ 0,
+ 0,
+ kDepth,
+ kBorder,
+ kFormat3,
+ kType,
+ false);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check removing it.
+ framebuffer_->AttachTexture(GL_COLOR_ATTACHMENT0, NULL, 0, 0, 0);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+}
+
+TEST_F(FramebufferInfoTest, DrawBuffers) {
+ const GLuint kTextureClientId[] = { 33, 34 };
+ const GLuint kTextureServiceId[] = { 333, 334 };
+
+ for (GLenum i = GL_COLOR_ATTACHMENT0;
+ i < GL_COLOR_ATTACHMENT0 + kMaxColorAttachments; ++i) {
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(i));
+ }
+ EXPECT_FALSE(framebuffer_->HasUnclearedColorAttachments());
+
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT0),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER0_ARB));
+ for (GLenum i = GL_DRAW_BUFFER1_ARB;
+ i < GL_DRAW_BUFFER0_ARB + kMaxDrawBuffers; ++i) {
+ EXPECT_EQ(static_cast<GLenum>(GL_NONE),
+ framebuffer_->GetDrawBuffer(i));
+ }
+
+ for (size_t ii = 0; ii < arraysize(kTextureClientId); ++ii) {
+ texture_manager_->CreateTexture(
+ kTextureClientId[ii], kTextureServiceId[ii]);
+ scoped_refptr<TextureRef> texture(
+ texture_manager_->GetTexture(kTextureClientId[ii]));
+ ASSERT_TRUE(texture.get() != NULL);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0 + ii, texture.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0 + ii));
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0 + ii);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_TRUE(attachment->cleared());
+ }
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_FALSE(framebuffer_->HasUnclearedColorAttachments());
+
+ // Set a texture as uncleared.
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClientId[1]));
+ texture_manager_->SetTarget(texture1.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(
+ texture1.get(), GL_TEXTURE_2D, 0, GL_RGBA, 4, 4,
+ 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, false);
+
+ const Framebuffer::Attachment* attachment1 =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT1);
+ ASSERT_TRUE(attachment1 != NULL);
+ EXPECT_FALSE(attachment1->cleared());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT1));
+ EXPECT_TRUE(framebuffer_->HasUnclearedColorAttachments());
+
+ GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
+ framebuffer_->SetDrawBuffers(2, buffers);
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT0),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER0_ARB));
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT1),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER1_ARB));
+ for (GLenum i = GL_DRAW_BUFFER2_ARB;
+ i < GL_DRAW_BUFFER0_ARB + kMaxDrawBuffers; ++i) {
+ EXPECT_EQ(static_cast<GLenum>(GL_NONE),
+ framebuffer_->GetDrawBuffer(i));
+ }
+
+ // Nothing happens.
+ framebuffer_->PrepareDrawBuffersForClear();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+
+ // Now we disable a draw buffer 1.
+ buffers[1] = GL_NONE;
+ framebuffer_->SetDrawBuffers(2, buffers);
+ // We will enable the disabled draw buffer for clear(), and disable it
+ // after the clear.
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ framebuffer_->PrepareDrawBuffersForClear();
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+
+ // Now remove draw buffer 1's attachment.
+ framebuffer_->AttachTexture(GL_COLOR_ATTACHMENT1, NULL, 0, 0, 0);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT1) == NULL);
+
+ // Nothing happens.
+ framebuffer_->PrepareDrawBuffersForClear();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+}
+
+class FramebufferInfoFloatTest : public FramebufferInfoTest {
+ public:
+ FramebufferInfoFloatTest()
+ : FramebufferInfoTest() {
+ }
+ virtual ~FramebufferInfoFloatTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ InitializeContext("OpenGL ES 3.0",
+ "GL_OES_texture_float GL_EXT_color_buffer_float");
+ }
+};
+
+TEST_F(FramebufferInfoFloatTest, AttachFloatTexture) {
+ const GLuint kTextureClientId = 33;
+ const GLuint kTextureServiceId = 333;
+ const GLint kDepth = 1;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_FLOAT;
+ const GLsizei kWidth = 16;
+ const GLsizei kHeight = 32;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kInternalFormat = GL_RGBA32F;
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLsizei kSamples = 0;
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+
+ texture_manager_->CreateTexture(kTextureClientId, kTextureServiceId);
+ scoped_refptr<TextureRef> texture(
+ texture_manager_->GetTexture(kTextureClientId));
+ ASSERT_TRUE(texture.get() != NULL);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture.get(), kTarget, kLevel, kSamples);
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ texture_manager_->SetTarget(texture.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture.get(),
+ GL_TEXTURE_2D,
+ kLevel,
+ kInternalFormat,
+ kWidth,
+ kHeight,
+ kDepth,
+ kBorder,
+ kFormat,
+ kType,
+ false);
+ // Texture with a sized float internalformat is allowed as an attachment
+ // since float color attachment extension is present.
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+}
+
+TEST_F(FramebufferInfoTest, UnbindRenderbuffer) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kRenderbufferClient2Id = 34;
+ const GLuint kRenderbufferService2Id = 334;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient2Id, kRenderbufferService2Id);
+ Renderbuffer* renderbuffer2 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient2Id);
+ ASSERT_TRUE(renderbuffer2 != NULL);
+
+ // Attach to 2 attachment points.
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, renderbuffer1);
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ // Check they were attached.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind unattached renderbuffer.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer2);
+ // Should be no-op.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind renderbuffer.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ // Check they were detached
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) == NULL);
+}
+
+TEST_F(FramebufferInfoTest, UnbindTexture) {
+ const GLuint kTextureClient1Id = 33;
+ const GLuint kTextureService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ texture_manager_->CreateTexture(kTextureClient1Id, kTextureService1Id);
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClient1Id));
+ ASSERT_TRUE(texture1.get() != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+
+ // Attach to 2 attachment points.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture1.get(), kTarget1, kLevel1, kSamples1);
+ framebuffer_->AttachTexture(
+ GL_DEPTH_ATTACHMENT, texture1.get(), kTarget1, kLevel1, kSamples1);
+ // Check they were attached.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind unattached texture.
+ framebuffer_->UnbindTexture(kTarget1, texture2.get());
+ // Should be no-op.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind texture.
+ framebuffer_->UnbindTexture(kTarget1, texture1.get());
+ // Check they were detached
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) == NULL);
+}
+
+TEST_F(FramebufferInfoTest, IsCompleteMarkAsComplete) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+
+ // Check MarkAsComlete marks as complete.
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+
+ // Check at attaching marks as not complete.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+
+ // Check MarkAttachmentsAsCleared marks as complete.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+
+ // Check Unbind marks as not complete.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+ framebuffer_->UnbindTexture(kTarget1, texture2.get());
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+}
+
+TEST_F(FramebufferInfoTest, GetStatus) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+ texture_manager_->SetTarget(texture2.get(), GL_TEXTURE_2D);
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check changing the attachments calls CheckFramebufferStatus.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE)).RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call with a different target calls CheckFramebufferStatus.
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check adding another attachment calls CheckFramebufferStatus.
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check changing the format calls CheckFramebuffferStatus.
+ TestHelper::SetTexParameteriWithExpectations(gl_.get(),
+ error_state_.get(),
+ texture_manager_.get(),
+ texture2.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE,
+ GL_NO_ERROR);
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check since it did not return FRAMEBUFFER_COMPLETE that it calls
+ // CheckFramebufferStatus
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check putting it back does not call CheckFramebufferStatus.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ TestHelper::SetTexParameteriWithExpectations(gl_.get(),
+ error_state_.get(),
+ texture_manager_.get(),
+ texture2.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_REPEAT,
+ GL_NO_ERROR);
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check Unbinding does not call CheckFramebufferStatus
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/gl_context_virtual.cc b/gpu/command_buffer/service/gl_context_virtual.cc
new file mode 100644
index 0000000..a29e540
--- /dev/null
+++ b/gpu/command_buffer/service/gl_context_virtual.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+
+#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+
+GLContextVirtual::GLContextVirtual(
+ gfx::GLShareGroup* share_group,
+ gfx::GLContext* shared_context,
+ base::WeakPtr<gles2::GLES2Decoder> decoder)
+ : GLContext(share_group),
+ shared_context_(shared_context),
+ display_(NULL),
+ decoder_(decoder) {
+}
+
+gfx::Display* GLContextVirtual::display() {
+ return display_;
+}
+
+bool GLContextVirtual::Initialize(
+ gfx::GLSurface* compatible_surface, gfx::GpuPreference gpu_preference) {
+ SetGLStateRestorer(new GLStateRestorerImpl(decoder_));
+
+ display_ = static_cast<gfx::Display*>(compatible_surface->GetDisplay());
+
+ // Virtual contexts obviously can't make a context that is compatible
+ // with the surface (the context already exists), but we do need to
+ // make a context current for SetupForVirtualization() below.
+ if (!IsCurrent(compatible_surface)) {
+ if (!shared_context_->MakeCurrent(compatible_surface)) {
+ // This is likely an error. The real context should be made as
+ // compatible with all required surfaces when it was created.
+ LOG(ERROR) << "Failed MakeCurrent(compatible_surface)";
+ return false;
+ }
+ }
+
+ shared_context_->SetupForVirtualization();
+ shared_context_->MakeVirtuallyCurrent(this, compatible_surface);
+ return true;
+}
+
+void GLContextVirtual::Destroy() {
+ shared_context_->OnReleaseVirtuallyCurrent(this);
+ shared_context_ = NULL;
+ display_ = NULL;
+}
+
+bool GLContextVirtual::MakeCurrent(gfx::GLSurface* surface) {
+ if (decoder_.get())
+ return shared_context_->MakeVirtuallyCurrent(this, surface);
+
+ LOG(ERROR) << "Trying to make virtual context current without decoder.";
+ return false;
+}
+
+void GLContextVirtual::ReleaseCurrent(gfx::GLSurface* surface) {
+ if (IsCurrent(surface)) {
+ shared_context_->OnReleaseVirtuallyCurrent(this);
+ shared_context_->ReleaseCurrent(surface);
+ }
+}
+
+bool GLContextVirtual::IsCurrent(gfx::GLSurface* surface) {
+ // If it's a real surface it needs to be current.
+ if (surface &&
+ !surface->IsOffscreen())
+ return shared_context_->IsCurrent(surface);
+
+ // Otherwise, only insure the context itself is current.
+ return shared_context_->IsCurrent(NULL);
+}
+
+void* GLContextVirtual::GetHandle() {
+ return shared_context_->GetHandle();
+}
+
+void GLContextVirtual::SetSwapInterval(int interval) {
+ shared_context_->SetSwapInterval(interval);
+}
+
+std::string GLContextVirtual::GetExtensions() {
+ return shared_context_->GetExtensions();
+}
+
+bool GLContextVirtual::GetTotalGpuMemory(size_t* bytes) {
+ return shared_context_->GetTotalGpuMemory(bytes);
+}
+
+void GLContextVirtual::SetSafeToForceGpuSwitch() {
+ // TODO(ccameron): This will not work if two contexts that disagree
+ // about whether or not forced gpu switching may be done both share
+ // the same underlying shared_context_.
+ return shared_context_->SetSafeToForceGpuSwitch();
+}
+
+bool GLContextVirtual::WasAllocatedUsingRobustnessExtension() {
+ return shared_context_->WasAllocatedUsingRobustnessExtension();
+}
+
+void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
+ shared_context_->SetUnbindFboOnMakeCurrent();
+}
+
+GLContextVirtual::~GLContextVirtual() {
+ Destroy();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_context_virtual.h b/gpu/command_buffer/service/gl_context_virtual.h
new file mode 100644
index 0000000..fdecbdd
--- /dev/null
+++ b/gpu/command_buffer/service/gl_context_virtual.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_context.h"
+
+namespace gfx {
+class Display;
+class GLSurface;
+class GLStateRestorer;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+}
+
+// Encapsulates a virtual OpenGL context.
+class GPU_EXPORT GLContextVirtual : public gfx::GLContext {
+ public:
+ GLContextVirtual(
+ gfx::GLShareGroup* share_group,
+ gfx::GLContext* shared_context,
+ base::WeakPtr<gles2::GLES2Decoder> decoder);
+
+ gfx::Display* display();
+
+ // Implement GLContext.
+ virtual bool Initialize(
+ gfx::GLSurface* compatible_surface,
+ gfx::GpuPreference gpu_preference) OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+ virtual bool MakeCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual void ReleaseCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual bool IsCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual void* GetHandle() OVERRIDE;
+ virtual void SetSwapInterval(int interval) OVERRIDE;
+ virtual std::string GetExtensions() OVERRIDE;
+ virtual bool GetTotalGpuMemory(size_t* bytes) OVERRIDE;
+ virtual void SetSafeToForceGpuSwitch() OVERRIDE;
+ virtual bool WasAllocatedUsingRobustnessExtension() OVERRIDE;
+ virtual void SetUnbindFboOnMakeCurrent() OVERRIDE;
+
+ protected:
+ virtual ~GLContextVirtual();
+
+ private:
+ scoped_refptr<gfx::GLContext> shared_context_;
+ gfx::Display* display_;
+ base::WeakPtr<gles2::GLES2Decoder> decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLContextVirtual);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
diff --git a/gpu/command_buffer/service/gl_state_restorer_impl.cc b/gpu/command_buffer/service/gl_state_restorer_impl.cc
new file mode 100644
index 0000000..5fbd425
--- /dev/null
+++ b/gpu/command_buffer/service/gl_state_restorer_impl.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+namespace gpu {
+
+GLStateRestorerImpl::GLStateRestorerImpl(
+ base::WeakPtr<gles2::GLES2Decoder> decoder)
+ : decoder_(decoder) {
+}
+
+GLStateRestorerImpl::~GLStateRestorerImpl() {
+}
+
+bool GLStateRestorerImpl::IsInitialized() {
+ DCHECK(decoder_.get());
+ return decoder_->initialized();
+}
+
+void GLStateRestorerImpl::RestoreState(const gfx::GLStateRestorer* prev_state) {
+ DCHECK(decoder_.get());
+ const GLStateRestorerImpl* restorer_impl =
+ static_cast<const GLStateRestorerImpl*>(prev_state);
+ decoder_->RestoreState(
+ restorer_impl ? restorer_impl->GetContextState() : NULL);
+}
+
+void GLStateRestorerImpl::RestoreAllTextureUnitBindings() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreAllTextureUnitBindings(NULL);
+}
+
+void GLStateRestorerImpl::RestoreActiveTextureUnitBinding(unsigned int target) {
+ DCHECK(decoder_.get());
+ decoder_->RestoreActiveTextureUnitBinding(target);
+}
+
+void GLStateRestorerImpl::RestoreFramebufferBindings() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreFramebufferBindings();
+}
+
+const gles2::ContextState* GLStateRestorerImpl::GetContextState() const {
+ DCHECK(decoder_.get());
+ return decoder_->GetContextState();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_state_restorer_impl.h b/gpu/command_buffer/service/gl_state_restorer_impl.h
new file mode 100644
index 0000000..73534b8
--- /dev/null
+++ b/gpu/command_buffer/service/gl_state_restorer_impl.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GLStateRestorerImpl class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_state_restorer.h"
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+struct ContextState;
+}
+
+// This class implements a GLStateRestorer that forwards to a GLES2Decoder.
+class GPU_EXPORT GLStateRestorerImpl : public gfx::GLStateRestorer {
+ public:
+ explicit GLStateRestorerImpl(base::WeakPtr<gles2::GLES2Decoder> decoder);
+ virtual ~GLStateRestorerImpl();
+
+ virtual bool IsInitialized() OVERRIDE;
+ virtual void RestoreState(const gfx::GLStateRestorer* prev_state) OVERRIDE;
+ virtual void RestoreAllTextureUnitBindings() OVERRIDE;
+ virtual void RestoreActiveTextureUnitBinding(unsigned int target) OVERRIDE;
+ virtual void RestoreFramebufferBindings() OVERRIDE;
+
+ private:
+ const gles2::ContextState* GetContextState() const;
+ base::WeakPtr<gles2::GLES2Decoder> decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLStateRestorerImpl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
diff --git a/gpu/command_buffer/service/gl_surface_mock.cc b/gpu/command_buffer/service/gl_surface_mock.cc
new file mode 100644
index 0000000..9706a18
--- /dev/null
+++ b/gpu/command_buffer/service/gl_surface_mock.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+
+namespace gpu {
+
+GLSurfaceMock::GLSurfaceMock() {
+}
+
+GLSurfaceMock::~GLSurfaceMock() {
+}
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_surface_mock.h b/gpu/command_buffer/service/gl_surface_mock.h
new file mode 100644
index 0000000..0652be6
--- /dev/null
+++ b/gpu/command_buffer/service/gl_surface_mock.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
+
+#include "ui/gl/gl_surface.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class GLSurfaceMock : public gfx::GLSurface {
+ public:
+ GLSurfaceMock();
+
+ MOCK_METHOD0(Initialize, bool());
+ MOCK_METHOD0(Destroy, void());
+ MOCK_METHOD1(Resize, bool(const gfx::Size& size));
+ MOCK_METHOD0(IsOffscreen, bool());
+ MOCK_METHOD0(SwapBuffers, bool());
+ MOCK_METHOD4(PostSubBuffer, bool(int x, int y, int width, int height));
+ MOCK_METHOD0(SupportsPostSubBuffer, bool());
+ MOCK_METHOD0(GetSize, gfx::Size());
+ MOCK_METHOD0(GetHandle, void*());
+ MOCK_METHOD0(GetBackingFrameBufferObject, unsigned int());
+ MOCK_METHOD1(OnMakeCurrent, bool(gfx::GLContext* context));
+ MOCK_METHOD1(SetBackbufferAllocation, bool(bool allocated));
+ MOCK_METHOD1(SetFrontbufferAllocation, void(bool allocated));
+ MOCK_METHOD0(GetShareHandle, void*());
+ MOCK_METHOD0(GetDisplay, void*());
+ MOCK_METHOD0(GetConfig, void*());
+ MOCK_METHOD0(GetFormat, unsigned());
+
+ protected:
+ virtual ~GLSurfaceMock();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLSurfaceMock);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
diff --git a/gpu/command_buffer/service/gl_utils.h b/gpu/command_buffer/service/gl_utils.h
new file mode 100644
index 0000000..ade4a37
--- /dev/null
+++ b/gpu/command_buffer/service/gl_utils.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file includes all the necessary GL headers and implements some useful
+// utilities.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
+
+#include "build/build_config.h"
+#include "ui/gl/gl_bindings.h"
+
+// Define this for extra GL error debugging (slower).
+// #define GL_ERROR_DEBUGGING
+#ifdef GL_ERROR_DEBUGGING
+#define CHECK_GL_ERROR() do { \
+ GLenum gl_error = glGetError(); \
+ LOG_IF(ERROR, gl_error != GL_NO_ERROR) << "GL Error :" << gl_error; \
+ } while (0)
+#else // GL_ERROR_DEBUGGING
+#define CHECK_GL_ERROR() void(0)
+#endif // GL_ERROR_DEBUGGING
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
new file mode 100644
index 0000000..f98ca2e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -0,0 +1,482 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
+
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#define SHADER(src) \
+ "#ifdef GL_ES\n" \
+ "precision mediump float;\n" \
+ "#define TexCoordPrecision mediump\n" \
+ "#else\n" \
+ "#define TexCoordPrecision\n" \
+ "#endif\n" #src
+#define SHADER_2D(src) \
+ "#define SamplerType sampler2D\n" \
+ "#define TextureLookup texture2D\n" SHADER(src)
+#define SHADER_RECTANGLE_ARB(src) \
+ "#define SamplerType samplerRect\n" \
+ "#define TextureLookup textureRect\n" SHADER(src)
+#define SHADER_EXTERNAL_OES(src) \
+ "#extension GL_OES_EGL_image_external : require\n" \
+ "#define SamplerType samplerExternalOES\n" \
+ "#define TextureLookup texture2D\n" SHADER(src)
+#define FRAGMENT_SHADERS(src) \
+ SHADER_2D(src), SHADER_RECTANGLE_ARB(src), SHADER_EXTERNAL_OES(src)
+
+namespace {
+
+enum VertexShaderId {
+ VERTEX_SHADER_COPY_TEXTURE,
+ VERTEX_SHADER_COPY_TEXTURE_FLIP_Y,
+ NUM_VERTEX_SHADERS,
+};
+
+enum FragmentShaderId {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
+ NUM_FRAGMENT_SHADERS,
+};
+
+const char* vertex_shader_source[NUM_VERTEX_SHADERS] = {
+ // VERTEX_SHADER_COPY_TEXTURE
+ SHADER(
+ uniform mat4 u_matrix;
+ uniform vec2 u_half_size;
+ attribute vec4 a_position;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_Position = u_matrix * a_position;
+ v_uv = a_position.xy * vec2(u_half_size.s, u_half_size.t) +
+ vec2(u_half_size.s, u_half_size.t);
+ }),
+ // VERTEX_SHADER_COPY_TEXTURE_FLIP_Y
+ SHADER(
+ uniform mat4 u_matrix;
+ uniform vec2 u_half_size;
+ attribute vec4 a_position;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_Position = u_matrix * a_position;
+ v_uv = a_position.xy * vec2(u_half_size.s, -u_half_size.t) +
+ vec2(u_half_size.s, u_half_size.t);
+ }),
+};
+
+const char* fragment_shader_source[NUM_FRAGMENT_SHADERS] = {
+ // FRAGMENT_SHADER_COPY_TEXTURE_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ }),
+ // FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ gl_FragColor.rgb *= gl_FragColor.a;
+ }),
+ // FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ if (gl_FragColor.a > 0.0)
+ gl_FragColor.rgb /= gl_FragColor.a;
+ }),
+};
+
+// Returns the correct vertex shader id to evaluate the copy operation for
+// the CHROMIUM_flipy setting.
+VertexShaderId GetVertexShaderId(bool flip_y) {
+ // bit 0: flip y
+ static VertexShaderId shader_ids[] = {
+ VERTEX_SHADER_COPY_TEXTURE,
+ VERTEX_SHADER_COPY_TEXTURE_FLIP_Y,
+ };
+
+ unsigned index = flip_y ? 1 : 0;
+ return shader_ids[index];
+}
+
+// Returns the correct fragment shader id to evaluate the copy operation for
+// the premultiply alpha pixel store settings and target.
+FragmentShaderId GetFragmentShaderId(bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ GLenum target) {
+ enum {
+ SAMPLER_2D,
+ SAMPLER_RECTANGLE_ARB,
+ SAMPLER_EXTERNAL_OES,
+ NUM_SAMPLERS
+ };
+
+ // bit 0: premultiply alpha
+ // bit 1: unpremultiply alpha
+ static FragmentShaderId shader_ids[][NUM_SAMPLERS] = {
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ }};
+
+ unsigned index = (premultiply_alpha ? (1 << 0) : 0) |
+ (unpremultiply_alpha ? (1 << 1) : 0);
+
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return shader_ids[index][SAMPLER_2D];
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return shader_ids[index][SAMPLER_RECTANGLE_ARB];
+ case GL_TEXTURE_EXTERNAL_OES:
+ return shader_ids[index][SAMPLER_EXTERNAL_OES];
+ default:
+ break;
+ }
+
+ NOTREACHED();
+ return shader_ids[0][SAMPLER_2D];
+}
+
+void CompileShader(GLuint shader, const char* shader_source) {
+ glShaderSource(shader, 1, &shader_source, 0);
+ glCompileShader(shader);
+#ifndef NDEBUG
+ GLint compile_status;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
+ if (GL_TRUE != compile_status)
+ DLOG(ERROR) << "CopyTextureCHROMIUM: shader compilation failure.";
+#endif
+}
+
+void DeleteShader(GLuint shader) {
+ if (shader)
+ glDeleteShader(shader);
+}
+
+bool BindFramebufferTexture2D(GLenum target,
+ GLuint texture_id,
+ GLint level,
+ GLuint framebuffer) {
+ DCHECK(target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE_ARB);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(target, texture_id);
+ // NVidia drivers require texture settings to be a certain way
+ // or they won't report FRAMEBUFFER_COMPLETE.
+ glTexParameterf(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebuffer);
+ glFramebufferTexture2DEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, texture_id, level);
+
+#ifndef NDEBUG
+ GLenum fb_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ if (GL_FRAMEBUFFER_COMPLETE != fb_status) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Incomplete framebuffer.";
+ return false;
+ }
+#endif
+ return true;
+}
+
+void DoCopyTexImage2D(const gpu::gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLuint framebuffer) {
+ DCHECK(source_target == GL_TEXTURE_2D ||
+ source_target == GL_TEXTURE_RECTANGLE_ARB);
+ if (BindFramebufferTexture2D(
+ source_target, source_id, 0 /* level */, framebuffer)) {
+ glBindTexture(GL_TEXTURE_2D, dest_id);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glCopyTexImage2D(GL_TEXTURE_2D,
+ dest_level,
+ dest_internal_format,
+ 0 /* x */,
+ 0 /* y */,
+ width,
+ height,
+ 0 /* border */);
+ }
+
+ decoder->RestoreTextureState(source_id);
+ decoder->RestoreTextureState(dest_id);
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreFramebufferBindings();
+}
+
+} // namespace
+
+namespace gpu {
+
+CopyTextureCHROMIUMResourceManager::CopyTextureCHROMIUMResourceManager()
+ : initialized_(false),
+ vertex_shaders_(NUM_VERTEX_SHADERS, 0u),
+ fragment_shaders_(NUM_FRAGMENT_SHADERS, 0u),
+ buffer_id_(0u),
+ framebuffer_(0u) {}
+
+CopyTextureCHROMIUMResourceManager::~CopyTextureCHROMIUMResourceManager() {
+ DCHECK(!buffer_id_);
+ DCHECK(!framebuffer_);
+}
+
+void CopyTextureCHROMIUMResourceManager::Initialize(
+ const gles2::GLES2Decoder* decoder) {
+ COMPILE_ASSERT(
+ kVertexPositionAttrib == 0u,
+ Position_attribs_must_be_0);
+ DCHECK(!buffer_id_);
+ DCHECK(!framebuffer_);
+ DCHECK(programs_.empty());
+
+ // Initialize all of the GPU resources required to perform the copy.
+ glGenBuffersARB(1, &buffer_id_);
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_id_);
+ const GLfloat kQuadVertices[] = {-1.0f, -1.0f,
+ 1.0f, -1.0f,
+ 1.0f, 1.0f,
+ -1.0f, 1.0f};
+ glBufferData(
+ GL_ARRAY_BUFFER, sizeof(kQuadVertices), kQuadVertices, GL_STATIC_DRAW);
+
+ glGenFramebuffersEXT(1, &framebuffer_);
+
+ decoder->RestoreBufferBindings();
+
+ initialized_ = true;
+}
+
+void CopyTextureCHROMIUMResourceManager::Destroy() {
+ if (!initialized_)
+ return;
+
+ glDeleteFramebuffersEXT(1, &framebuffer_);
+ framebuffer_ = 0;
+
+ std::for_each(vertex_shaders_.begin(), vertex_shaders_.end(), DeleteShader);
+ std::for_each(
+ fragment_shaders_.begin(), fragment_shaders_.end(), DeleteShader);
+
+ for (ProgramMap::const_iterator it = programs_.begin(); it != programs_.end();
+ ++it) {
+ const ProgramInfo& info = it->second;
+ glDeleteProgram(info.program);
+ }
+
+ glDeleteBuffersARB(1, &buffer_id_);
+ buffer_id_ = 0;
+}
+
+void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
+ const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLenum source_internal_format,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha) {
+ bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
+ // GL_INVALID_OPERATION is generated if the currently bound framebuffer's
+ // format does not contain a superset of the components required by the base
+ // format of internalformat.
+ // https://www.khronos.org/opengles/sdk/docs/man/xhtml/glCopyTexImage2D.xml
+ bool source_format_contain_superset_of_dest_format =
+ source_internal_format == dest_internal_format ||
+ (source_internal_format == GL_RGBA && dest_internal_format == GL_RGB);
+ // GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
+ // so restrict this to GL_TEXTURE_2D.
+ if (source_target == GL_TEXTURE_2D && !flip_y && !premultiply_alpha_change &&
+ source_format_contain_superset_of_dest_format) {
+ DoCopyTexImage2D(decoder,
+ source_target,
+ source_id,
+ dest_id,
+ dest_level,
+ dest_internal_format,
+ width,
+ height,
+ framebuffer_);
+ return;
+ }
+
+ // Use default transform matrix if no transform passed in.
+ const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+ DoCopyTextureWithTransform(decoder,
+ source_target,
+ source_id,
+ dest_id,
+ dest_level,
+ width,
+ height,
+ flip_y,
+ premultiply_alpha,
+ unpremultiply_alpha,
+ default_matrix);
+}
+
+void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
+ const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ const GLfloat transform_matrix[16]) {
+ DCHECK(source_target == GL_TEXTURE_2D ||
+ source_target == GL_TEXTURE_RECTANGLE_ARB ||
+ source_target == GL_TEXTURE_EXTERNAL_OES);
+ if (!initialized_) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Uninitialized manager.";
+ return;
+ }
+
+ VertexShaderId vertex_shader_id = GetVertexShaderId(flip_y);
+ DCHECK_LT(static_cast<size_t>(vertex_shader_id), vertex_shaders_.size());
+ FragmentShaderId fragment_shader_id = GetFragmentShaderId(
+ premultiply_alpha, unpremultiply_alpha, source_target);
+ DCHECK_LT(static_cast<size_t>(fragment_shader_id), fragment_shaders_.size());
+
+ ProgramMapKey key(vertex_shader_id, fragment_shader_id);
+ ProgramInfo* info = &programs_[key];
+ // Create program if necessary.
+ if (!info->program) {
+ info->program = glCreateProgram();
+ GLuint* vertex_shader = &vertex_shaders_[vertex_shader_id];
+ if (!*vertex_shader) {
+ *vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ CompileShader(*vertex_shader, vertex_shader_source[vertex_shader_id]);
+ }
+ glAttachShader(info->program, *vertex_shader);
+ GLuint* fragment_shader = &fragment_shaders_[fragment_shader_id];
+ if (!*fragment_shader) {
+ *fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ CompileShader(*fragment_shader,
+ fragment_shader_source[fragment_shader_id]);
+ }
+ glAttachShader(info->program, *fragment_shader);
+ glBindAttribLocation(info->program, kVertexPositionAttrib, "a_position");
+ glLinkProgram(info->program);
+#ifndef NDEBUG
+ GLint linked;
+ glGetProgramiv(info->program, GL_LINK_STATUS, &linked);
+ if (!linked)
+ DLOG(ERROR) << "CopyTextureCHROMIUM: program link failure.";
+#endif
+ info->matrix_handle = glGetUniformLocation(info->program, "u_matrix");
+ info->half_size_handle = glGetUniformLocation(info->program, "u_half_size");
+ info->sampler_handle = glGetUniformLocation(info->program, "u_sampler");
+ }
+ glUseProgram(info->program);
+
+#ifndef NDEBUG
+ glValidateProgram(info->program);
+ GLint validation_status;
+ glGetProgramiv(info->program, GL_VALIDATE_STATUS, &validation_status);
+ if (GL_TRUE != validation_status) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Invalid shader.";
+ return;
+ }
+#endif
+
+ glUniformMatrix4fv(info->matrix_handle, 1, GL_FALSE, transform_matrix);
+ if (source_target == GL_TEXTURE_RECTANGLE_ARB)
+ glUniform2f(info->half_size_handle, width / 2.0f, height / 2.0f);
+ else
+ glUniform2f(info->half_size_handle, 0.5f, 0.5f);
+
+ if (BindFramebufferTexture2D(
+ GL_TEXTURE_2D, dest_id, dest_level, framebuffer_)) {
+ decoder->ClearAllAttributes();
+ glEnableVertexAttribArray(kVertexPositionAttrib);
+
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_id_);
+ glVertexAttribPointer(kVertexPositionAttrib, 2, GL_FLOAT, GL_FALSE, 0, 0);
+
+ glUniform1i(info->sampler_handle, 0);
+
+ glBindTexture(source_target, source_id);
+ glTexParameterf(source_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(source_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(source_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(source_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glDisable(GL_DEPTH_TEST);
+ glDisable(GL_SCISSOR_TEST);
+ glDisable(GL_STENCIL_TEST);
+ glDisable(GL_CULL_FACE);
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glDepthMask(GL_FALSE);
+ glDisable(GL_BLEND);
+
+ glViewport(0, 0, width, height);
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+ }
+
+ decoder->RestoreAllAttributes();
+ decoder->RestoreTextureState(source_id);
+ decoder->RestoreTextureState(dest_id);
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreProgramBindings();
+ decoder->RestoreBufferBindings();
+ decoder->RestoreFramebufferBindings();
+ decoder->RestoreGlobalState();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
new file mode 100644
index 0000000..083fc4c
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
+
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Decoder;
+
+} // namespace gles2.
+
+// This class encapsulates the resources required to implement the
+// GL_CHROMIUM_copy_texture extension. The copy operation is performed
+// via glCopyTexImage2D() or a blit to a framebuffer object.
+// The target of |dest_id| texture must be GL_TEXTURE_2D.
+class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
+ public:
+ CopyTextureCHROMIUMResourceManager();
+ ~CopyTextureCHROMIUMResourceManager();
+
+ void Initialize(const gles2::GLES2Decoder* decoder);
+ void Destroy();
+
+ void DoCopyTexture(const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLenum source_internal_format,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha);
+
+ // This will apply a transform on the source texture before copying to
+ // destination texture.
+ void DoCopyTextureWithTransform(const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ const GLfloat transform_matrix[16]);
+
+ // The attributes used during invocation of the extension.
+ static const GLuint kVertexPositionAttrib = 0;
+
+ private:
+ struct ProgramInfo {
+ ProgramInfo()
+ : program(0u),
+ matrix_handle(0u),
+ half_size_handle(0u),
+ sampler_handle(0u) {}
+
+ GLuint program;
+ GLuint matrix_handle;
+ GLuint half_size_handle;
+ GLuint sampler_handle;
+ };
+
+ bool initialized_;
+ typedef std::vector<GLuint> ShaderVector;
+ ShaderVector vertex_shaders_;
+ ShaderVector fragment_shaders_;
+ typedef std::pair<int, int> ProgramMapKey;
+ typedef base::hash_map<ProgramMapKey, ProgramInfo> ProgramMap;
+ ProgramMap programs_;
+ GLuint buffer_id_;
+ GLuint framebuffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CopyTextureCHROMIUMResourceManager);
+};
+
+} // namespace gpu.
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
new file mode 100644
index 0000000..9bf037f
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -0,0 +1,11153 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/float_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "build/build_config.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/gpu_state_tracer.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "third_party/smhasher/src/City.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_surface.h"
+
+#if defined(OS_MACOSX)
+#include <IOSurface/IOSurfaceAPI.h>
+// Note that this must be included after gl_bindings.h to avoid conflicts.
+#include <OpenGL/CGLIOSurface.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/win_util.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+static const char kOESDerivativeExtension[] = "GL_OES_standard_derivatives";
+static const char kEXTFragDepthExtension[] = "GL_EXT_frag_depth";
+static const char kEXTDrawBuffersExtension[] = "GL_EXT_draw_buffers";
+static const char kEXTShaderTextureLodExtension[] = "GL_EXT_shader_texture_lod";
+
+static bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
+ GLint rangeMax,
+ GLint precision) {
+ return (rangeMin >= 62) && (rangeMax >= 62) && (precision >= 16);
+}
+
+static void GetShaderPrecisionFormatImpl(GLenum shader_type,
+ GLenum precision_type,
+ GLint *range, GLint *precision) {
+ switch (precision_type) {
+ case GL_LOW_INT:
+ case GL_MEDIUM_INT:
+ case GL_HIGH_INT:
+ // These values are for a 32-bit twos-complement integer format.
+ range[0] = 31;
+ range[1] = 30;
+ *precision = 0;
+ break;
+ case GL_LOW_FLOAT:
+ case GL_MEDIUM_FLOAT:
+ case GL_HIGH_FLOAT:
+ // These values are for an IEEE single-precision floating-point format.
+ range[0] = 127;
+ range[1] = 127;
+ *precision = 23;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2 &&
+ gfx::g_driver_gl.fn.glGetShaderPrecisionFormatFn) {
+ // This function is sometimes defined even though it's really just
+ // a stub, so we need to set range and precision as if it weren't
+ // defined before calling it.
+ // On Mac OS with some GPUs, calling this generates a
+ // GL_INVALID_OPERATION error. Avoid calling it on non-GLES2
+ // platforms.
+ glGetShaderPrecisionFormat(shader_type, precision_type,
+ range, precision);
+
+ // TODO(brianderson): Make the following official workarounds.
+
+ // Some drivers have bugs where they report the ranges as a negative number.
+ // Taking the absolute value here shouldn't hurt because negative numbers
+ // aren't expected anyway.
+ range[0] = abs(range[0]);
+ range[1] = abs(range[1]);
+
+ // If the driver reports a precision for highp float that isn't actually
+ // highp, don't pretend like it's supported because shader compilation will
+ // fail anyway.
+ if (precision_type == GL_HIGH_FLOAT &&
+ !PrecisionMeetsSpecForHighpFloat(range[0], range[1], *precision)) {
+ range[0] = 0;
+ range[1] = 0;
+ *precision = 0;
+ }
+ }
+}
+
+static gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform) {
+ switch (plane_transform) {
+ case GL_OVERLAY_TRANSFORM_NONE_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_NONE;
+ case GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL;
+ case GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL;
+ case GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_90;
+ case GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_180;
+ case GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_270;
+ default:
+ return gfx::OVERLAY_TRANSFORM_INVALID;
+ }
+}
+
+} // namespace
+
+class GLES2DecoderImpl;
+
+// Local versions of the SET_GL_ERROR macros
+#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
+ ERRORSTATE_SET_GL_ERROR(state_.GetErrorState(), error, function_name, msg)
+#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(state_.GetErrorState(), \
+ function_name, value, label)
+#define LOCAL_SET_GL_ERROR_INVALID_PARAM(error, function_name, pname) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAM(state_.GetErrorState(), error, \
+ function_name, pname)
+#define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(state_.GetErrorState(), \
+ function_name)
+#define LOCAL_PEEK_GL_ERROR(function_name) \
+ ERRORSTATE_PEEK_GL_ERROR(state_.GetErrorState(), function_name)
+#define LOCAL_CLEAR_REAL_GL_ERRORS(function_name) \
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(state_.GetErrorState(), function_name)
+#define LOCAL_PERFORMANCE_WARNING(msg) \
+ PerformanceWarning(__FILE__, __LINE__, msg)
+#define LOCAL_RENDER_WARNING(msg) \
+ RenderWarning(__FILE__, __LINE__, msg)
+
+// Check that certain assumptions the code makes are true. There are places in
+// the code where shared memory is passed direclty to GL. Example, glUniformiv,
+// glShaderSource. The command buffer code assumes GLint and GLsizei (and maybe
+// a few others) are 32bits. If they are not 32bits the code will have to change
+// to call those GL functions with service side memory and then copy the results
+// to shared memory, converting the sizes.
+COMPILE_ASSERT(sizeof(GLint) == sizeof(uint32), // NOLINT
+ GLint_not_same_size_as_uint32);
+COMPILE_ASSERT(sizeof(GLsizei) == sizeof(uint32), // NOLINT
+ GLint_not_same_size_as_uint32);
+COMPILE_ASSERT(sizeof(GLfloat) == sizeof(float), // NOLINT
+ GLfloat_not_same_size_as_float);
+
+// TODO(kbr): the use of this anonymous namespace core dumps the
+// linker on Mac OS X 10.6 when the symbol ordering file is used
+// namespace {
+
+// Returns the address of the first byte after a struct.
+template <typename T>
+const void* AddressAfterStruct(const T& pod) {
+ return reinterpret_cast<const uint8*>(&pod) + sizeof(pod);
+}
+
+// Returns the address of the frst byte after the struct or NULL if size >
+// immediate_data_size.
+template <typename RETURN_TYPE, typename COMMAND_TYPE>
+RETURN_TYPE GetImmediateDataAs(const COMMAND_TYPE& pod,
+ uint32 size,
+ uint32 immediate_data_size) {
+ return (size <= immediate_data_size) ?
+ static_cast<RETURN_TYPE>(const_cast<void*>(AddressAfterStruct(pod))) :
+ NULL;
+}
+
+// Computes the data size for certain gl commands like glUniform.
+bool ComputeDataSize(
+ GLuint count,
+ size_t size,
+ unsigned int elements_per_unit,
+ uint32* dst) {
+ uint32 value;
+ if (!SafeMultiplyUint32(count, size, &value)) {
+ return false;
+ }
+ if (!SafeMultiplyUint32(value, elements_per_unit, &value)) {
+ return false;
+ }
+ *dst = value;
+ return true;
+}
+
+// Return true if a character belongs to the ASCII subset as defined in
+// GLSL ES 1.0 spec section 3.1.
+static bool CharacterIsValidForGLES(unsigned char c) {
+ // Printing characters are valid except " $ ` @ \ ' DEL.
+ if (c >= 32 && c <= 126 &&
+ c != '"' &&
+ c != '$' &&
+ c != '`' &&
+ c != '@' &&
+ c != '\\' &&
+ c != '\'') {
+ return true;
+ }
+ // Horizontal tab, line feed, vertical tab, form feed, carriage return
+ // are also valid.
+ if (c >= 9 && c <= 13) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool StringIsValidForGLES(const char* str) {
+ for (; *str; ++str) {
+ if (!CharacterIsValidForGLES(*str)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This class prevents any GL errors that occur when it is in scope from
+// being reported to the client.
+class ScopedGLErrorSuppressor {
+ public:
+ explicit ScopedGLErrorSuppressor(
+ const char* function_name, ErrorState* error_state);
+ ~ScopedGLErrorSuppressor();
+ private:
+ const char* function_name_;
+ ErrorState* error_state_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedGLErrorSuppressor);
+};
+
+// Temporarily changes a decoder's bound texture and restore it when this
+// object goes out of scope. Also temporarily switches to using active texture
+// unit zero in case the client has changed that to something invalid.
+class ScopedTextureBinder {
+ public:
+ explicit ScopedTextureBinder(ContextState* state, GLuint id, GLenum target);
+ ~ScopedTextureBinder();
+
+ private:
+ ContextState* state_;
+ GLenum target_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTextureBinder);
+};
+
+// Temporarily changes a decoder's bound render buffer and restore it when this
+// object goes out of scope.
+class ScopedRenderBufferBinder {
+ public:
+ explicit ScopedRenderBufferBinder(ContextState* state, GLuint id);
+ ~ScopedRenderBufferBinder();
+
+ private:
+ ContextState* state_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedRenderBufferBinder);
+};
+
+// Temporarily changes a decoder's bound frame buffer and restore it when this
+// object goes out of scope.
+class ScopedFrameBufferBinder {
+ public:
+ explicit ScopedFrameBufferBinder(GLES2DecoderImpl* decoder, GLuint id);
+ ~ScopedFrameBufferBinder();
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedFrameBufferBinder);
+};
+
+// Temporarily changes a decoder's bound frame buffer to a resolved version of
+// the multisampled offscreen render buffer if that buffer is multisampled, and,
+// if it is bound or enforce_internal_framebuffer is true. If internal is
+// true, the resolved framebuffer is not visible to the parent.
+class ScopedResolvedFrameBufferBinder {
+ public:
+ explicit ScopedResolvedFrameBufferBinder(GLES2DecoderImpl* decoder,
+ bool enforce_internal_framebuffer,
+ bool internal);
+ ~ScopedResolvedFrameBufferBinder();
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ bool resolve_and_bind_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedResolvedFrameBufferBinder);
+};
+
+class ScopedModifyPixels {
+ public:
+ explicit ScopedModifyPixels(TextureRef* ref);
+ ~ScopedModifyPixels();
+
+ private:
+ TextureRef* ref_;
+};
+
+ScopedModifyPixels::ScopedModifyPixels(TextureRef* ref) : ref_(ref) {
+ if (ref_)
+ ref_->texture()->OnWillModifyPixels();
+}
+
+ScopedModifyPixels::~ScopedModifyPixels() {
+ if (ref_)
+ ref_->texture()->OnDidModifyPixels();
+}
+
+class ScopedRenderTo {
+ public:
+ explicit ScopedRenderTo(Framebuffer* framebuffer);
+ ~ScopedRenderTo();
+
+ private:
+ const Framebuffer* framebuffer_;
+};
+
+ScopedRenderTo::ScopedRenderTo(Framebuffer* framebuffer)
+ : framebuffer_(framebuffer) {
+ if (framebuffer)
+ framebuffer_->OnWillRenderTo();
+}
+
+ScopedRenderTo::~ScopedRenderTo() {
+ if (framebuffer_)
+ framebuffer_->OnDidRenderTo();
+}
+
+// Encapsulates an OpenGL texture.
+class BackTexture {
+ public:
+ explicit BackTexture(MemoryTracker* memory_tracker, ContextState* state);
+ ~BackTexture();
+
+ // Create a new render texture.
+ void Create();
+
+ // Set the initial size and format of a render texture or resize it.
+ bool AllocateStorage(const gfx::Size& size, GLenum format, bool zero);
+
+ // Copy the contents of the currently bound frame buffer.
+ void Copy(const gfx::Size& size, GLenum format);
+
+ // Destroy the render texture. This must be explicitly called before
+ // destroying this object.
+ void Destroy();
+
+ // Invalidate the texture. This can be used when a context is lost and it is
+ // not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ gfx::Size size() const {
+ return size_;
+ }
+
+ private:
+ MemoryTypeTracker memory_tracker_;
+ ContextState* state_;
+ size_t bytes_allocated_;
+ GLuint id_;
+ gfx::Size size_;
+ DISALLOW_COPY_AND_ASSIGN(BackTexture);
+};
+
+// Encapsulates an OpenGL render buffer of any format.
+class BackRenderbuffer {
+ public:
+ explicit BackRenderbuffer(
+ RenderbufferManager* renderbuffer_manager,
+ MemoryTracker* memory_tracker,
+ ContextState* state);
+ ~BackRenderbuffer();
+
+ // Create a new render buffer.
+ void Create();
+
+ // Set the initial size and format of a render buffer or resize it.
+ bool AllocateStorage(const FeatureInfo* feature_info,
+ const gfx::Size& size,
+ GLenum format,
+ GLsizei samples);
+
+ // Destroy the render buffer. This must be explicitly called before destroying
+ // this object.
+ void Destroy();
+
+ // Invalidate the render buffer. This can be used when a context is lost and
+ // it is not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ private:
+ RenderbufferManager* renderbuffer_manager_;
+ MemoryTypeTracker memory_tracker_;
+ ContextState* state_;
+ size_t bytes_allocated_;
+ GLuint id_;
+ DISALLOW_COPY_AND_ASSIGN(BackRenderbuffer);
+};
+
+// Encapsulates an OpenGL frame buffer.
+class BackFramebuffer {
+ public:
+ explicit BackFramebuffer(GLES2DecoderImpl* decoder);
+ ~BackFramebuffer();
+
+ // Create a new frame buffer.
+ void Create();
+
+ // Attach a color render buffer to a frame buffer.
+ void AttachRenderTexture(BackTexture* texture);
+
+ // Attach a render buffer to a frame buffer. Note that this unbinds any
+ // currently bound frame buffer.
+ void AttachRenderBuffer(GLenum target, BackRenderbuffer* render_buffer);
+
+ // Destroy the frame buffer. This must be explicitly called before destroying
+ // this object.
+ void Destroy();
+
+ // Invalidate the frame buffer. This can be used when a context is lost and it
+ // is not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ // See glCheckFramebufferStatusEXT.
+ GLenum CheckStatus();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ GLuint id_;
+ DISALLOW_COPY_AND_ASSIGN(BackFramebuffer);
+};
+
+struct FenceCallback {
+ explicit FenceCallback()
+ : fence(gfx::GLFence::Create()) {
+ DCHECK(fence);
+ }
+ std::vector<base::Closure> callbacks;
+ scoped_ptr<gfx::GLFence> fence;
+};
+
+class AsyncUploadTokenCompletionObserver
+ : public AsyncPixelTransferCompletionObserver {
+ public:
+ explicit AsyncUploadTokenCompletionObserver(uint32 async_upload_token)
+ : async_upload_token_(async_upload_token) {
+ }
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) OVERRIDE {
+ DCHECK(mem_params.buffer().get());
+ void* data = mem_params.GetDataAddress();
+ AsyncUploadSync* sync = static_cast<AsyncUploadSync*>(data);
+ sync->SetAsyncUploadToken(async_upload_token_);
+ }
+
+ private:
+ virtual ~AsyncUploadTokenCompletionObserver() {
+ }
+
+ uint32 async_upload_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncUploadTokenCompletionObserver);
+};
+
+// } // anonymous namespace.
+
+// static
+const unsigned int GLES2Decoder::kDefaultStencilMask =
+ static_cast<unsigned int>(-1);
+
+bool GLES2Decoder::GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ return false;
+}
+
+GLES2Decoder::GLES2Decoder()
+ : initialized_(false),
+ debug_(false),
+ log_commands_(false) {
+}
+
+GLES2Decoder::~GLES2Decoder() {
+}
+
+void GLES2Decoder::BeginDecoding() {}
+
+void GLES2Decoder::EndDecoding() {}
+
+// This class implements GLES2Decoder so we don't have to expose all the GLES2
+// cmd stuff to outside this class.
+class GLES2DecoderImpl : public GLES2Decoder,
+ public FramebufferManager::TextureDetachObserver,
+ public ErrorStateClient {
+ public:
+ explicit GLES2DecoderImpl(ContextGroup* group);
+ virtual ~GLES2DecoderImpl();
+
+ // Overridden from AsyncAPIInterface.
+ virtual Error DoCommand(unsigned int command,
+ unsigned int arg_count,
+ const void* args) OVERRIDE;
+
+ virtual error::Error DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) OVERRIDE;
+
+ template <bool DebugImpl>
+ error::Error DoCommandsImpl(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Overridden from AsyncAPIInterface.
+ virtual const char* GetCommandName(unsigned int command_id) const OVERRIDE;
+
+ // Overridden from GLES2Decoder.
+ virtual bool Initialize(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual void SetSurface(
+ const scoped_refptr<gfx::GLSurface>& surface) OVERRIDE;
+ virtual void ProduceFrontBuffer(const Mailbox& mailbox) OVERRIDE;
+ virtual bool ResizeOffscreenFrameBuffer(const gfx::Size& size) OVERRIDE;
+ void UpdateParentTextureInfo();
+ virtual bool MakeCurrent() OVERRIDE;
+ virtual GLES2Util* GetGLES2Util() OVERRIDE { return &util_; }
+ virtual gfx::GLContext* GetGLContext() OVERRIDE { return context_.get(); }
+ virtual ContextGroup* GetContextGroup() OVERRIDE { return group_.get(); }
+ virtual Capabilities GetCapabilities() OVERRIDE;
+ virtual void RestoreState(const ContextState* prev_state) OVERRIDE;
+
+ virtual void RestoreActiveTexture() const OVERRIDE {
+ state_.RestoreActiveTexture();
+ }
+ virtual void RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const OVERRIDE {
+ state_.RestoreAllTextureUnitBindings(prev_state);
+ }
+ virtual void RestoreActiveTextureUnitBinding(
+ unsigned int target) const OVERRIDE {
+ state_.RestoreActiveTextureUnitBinding(target);
+ }
+ virtual void RestoreBufferBindings() const OVERRIDE {
+ state_.RestoreBufferBindings();
+ }
+ virtual void RestoreGlobalState() const OVERRIDE {
+ state_.RestoreGlobalState(NULL);
+ }
+ virtual void RestoreProgramBindings() const OVERRIDE {
+ state_.RestoreProgramBindings();
+ }
+ virtual void RestoreTextureUnitBindings(unsigned unit) const OVERRIDE {
+ state_.RestoreTextureUnitBindings(unit, NULL);
+ }
+ virtual void RestoreFramebufferBindings() const OVERRIDE;
+ virtual void RestoreRenderbufferBindings() OVERRIDE;
+ virtual void RestoreTextureState(unsigned service_id) const OVERRIDE;
+
+ virtual void ClearAllAttributes() const OVERRIDE;
+ virtual void RestoreAllAttributes() const OVERRIDE;
+
+ virtual QueryManager* GetQueryManager() OVERRIDE {
+ return query_manager_.get();
+ }
+ virtual VertexArrayManager* GetVertexArrayManager() OVERRIDE {
+ return vertex_array_manager_.get();
+ }
+ virtual ImageManager* GetImageManager() OVERRIDE {
+ return image_manager_.get();
+ }
+ virtual bool ProcessPendingQueries() OVERRIDE;
+ virtual bool HasMoreIdleWork() OVERRIDE;
+ virtual void PerformIdleWork() OVERRIDE;
+
+ virtual void WaitForReadPixels(base::Closure callback) OVERRIDE;
+
+ virtual void SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) OVERRIDE;
+
+ virtual Logger* GetLogger() OVERRIDE;
+
+ virtual void BeginDecoding() OVERRIDE;
+ virtual void EndDecoding() OVERRIDE;
+
+ virtual ErrorState* GetErrorState() OVERRIDE;
+ virtual const ContextState* GetContextState() OVERRIDE { return &state_; }
+
+ virtual void SetShaderCacheCallback(
+ const ShaderCacheCallback& callback) OVERRIDE;
+ virtual void SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) OVERRIDE;
+
+ virtual AsyncPixelTransferManager*
+ GetAsyncPixelTransferManager() OVERRIDE;
+ virtual void ResetAsyncPixelTransferManagerForTest() OVERRIDE;
+ virtual void SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) OVERRIDE;
+ virtual void SetIgnoreCachedStateForTest(bool ignore) OVERRIDE;
+ void ProcessFinishedAsyncTransfers();
+
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) OVERRIDE;
+
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual base::TimeDelta GetTotalProcessingCommandsTime() OVERRIDE;
+ virtual void AddProcessingCommandsTime(base::TimeDelta) OVERRIDE;
+
+ // Restores the current state to the user's settings.
+ void RestoreCurrentFramebufferBindings();
+
+ // Sets DEPTH_TEST, STENCIL_TEST and color mask for the current framebuffer.
+ void ApplyDirtyState();
+
+ // These check the state of the currently bound framebuffer or the
+ // backbuffer if no framebuffer is bound.
+ // If all_draw_buffers is false, only check with COLOR_ATTACHMENT0, otherwise
+ // check with all attached and enabled color attachments.
+ bool BoundFramebufferHasColorAttachmentWithAlpha(bool all_draw_buffers);
+ bool BoundFramebufferHasDepthAttachment();
+ bool BoundFramebufferHasStencilAttachment();
+
+ virtual error::ContextLostReason GetContextLostReason() OVERRIDE;
+
+ // Overridden from FramebufferManager::TextureDetachObserver:
+ virtual void OnTextureRefDetachedFromFramebuffer(
+ TextureRef* texture) OVERRIDE;
+
+ // Overriden from ErrorStateClient.
+ virtual void OnOutOfMemoryError() OVERRIDE;
+
+ // Ensure Renderbuffer corresponding to last DoBindRenderbuffer() is bound.
+ void EnsureRenderbufferBound();
+
+ // Helpers to facilitate calling into compatible extensions.
+ static void RenderbufferStorageMultisampleHelper(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height);
+
+ void BlitFramebufferHelper(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter);
+
+ private:
+ friend class ScopedFrameBufferBinder;
+ friend class ScopedResolvedFrameBufferBinder;
+ friend class BackFramebuffer;
+
+ // Initialize or re-initialize the shader translator.
+ bool InitializeShaderTranslator();
+
+ void UpdateCapabilities();
+
+ // Helpers for the glGen and glDelete functions.
+ bool GenTexturesHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteTexturesHelper(GLsizei n, const GLuint* client_ids);
+ bool GenBuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteBuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenFramebuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteFramebuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenRenderbuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteRenderbuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
+ bool GenVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
+
+ // Helper for async upload token completion notification callback.
+ base::Closure AsyncUploadTokenCompletionClosure(uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset);
+
+
+
+ // Workarounds
+ void OnFboChanged() const;
+ void OnUseFramebuffer() const;
+
+ // TODO(gman): Cache these pointers?
+ BufferManager* buffer_manager() {
+ return group_->buffer_manager();
+ }
+
+ RenderbufferManager* renderbuffer_manager() {
+ return group_->renderbuffer_manager();
+ }
+
+ FramebufferManager* framebuffer_manager() {
+ return group_->framebuffer_manager();
+ }
+
+ ProgramManager* program_manager() {
+ return group_->program_manager();
+ }
+
+ ShaderManager* shader_manager() {
+ return group_->shader_manager();
+ }
+
+ ShaderTranslatorCache* shader_translator_cache() {
+ return group_->shader_translator_cache();
+ }
+
+ const TextureManager* texture_manager() const {
+ return group_->texture_manager();
+ }
+
+ TextureManager* texture_manager() {
+ return group_->texture_manager();
+ }
+
+ MailboxManager* mailbox_manager() {
+ return group_->mailbox_manager();
+ }
+
+ ImageManager* image_manager() { return image_manager_.get(); }
+
+ VertexArrayManager* vertex_array_manager() {
+ return vertex_array_manager_.get();
+ }
+
+ MemoryTracker* memory_tracker() {
+ return group_->memory_tracker();
+ }
+
+ bool EnsureGPUMemoryAvailable(size_t estimated_size) {
+ MemoryTracker* tracker = memory_tracker();
+ if (tracker) {
+ return tracker->EnsureGPUMemoryAvailable(estimated_size);
+ }
+ return true;
+ }
+
+ bool IsOffscreenBufferMultisampled() const {
+ return offscreen_target_samples_ > 1;
+ }
+
+ // Creates a Texture for the given texture.
+ TextureRef* CreateTexture(
+ GLuint client_id, GLuint service_id) {
+ return texture_manager()->CreateTexture(client_id, service_id);
+ }
+
+ // Gets the texture info for the given texture. Returns NULL if none exists.
+ TextureRef* GetTexture(GLuint client_id) const {
+ return texture_manager()->GetTexture(client_id);
+ }
+
+ // Deletes the texture info for the given texture.
+ void RemoveTexture(GLuint client_id) {
+ texture_manager()->RemoveTexture(client_id);
+ }
+
+ // Get the size (in pixels) of the currently bound frame buffer (either FBO
+ // or regular back buffer).
+ gfx::Size GetBoundReadFrameBufferSize();
+
+ // Get the format of the currently bound frame buffer (either FBO or regular
+ // back buffer)
+ GLenum GetBoundReadFrameBufferTextureType();
+ GLenum GetBoundReadFrameBufferInternalFormat();
+ GLenum GetBoundDrawFrameBufferInternalFormat();
+
+ // Wrapper for CompressedTexImage2D commands.
+ error::Error DoCompressedTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei image_size,
+ const void* data);
+
+ // Wrapper for CompressedTexSubImage2D.
+ void DoCompressedTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void * data);
+
+ // Wrapper for CopyTexImage2D.
+ void DoCopyTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border);
+
+ // Wrapper for SwapBuffers.
+ void DoSwapBuffers();
+
+ // Wrapper for CopyTexSubImage2D.
+ void DoCopyTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
+
+ // Validation for TexSubImage2D.
+ bool ValidateTexSubImage2D(
+ error::Error* error,
+ const char* function_name,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data);
+
+ // Wrapper for TexSubImage2D.
+ error::Error DoTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data);
+
+ // Extra validation for async tex(Sub)Image2D.
+ bool ValidateAsyncTransfer(
+ const char* function_name,
+ TextureRef* texture_ref,
+ GLenum target,
+ GLint level,
+ const void * data);
+
+ // Wrapper for TexImageIOSurface2DCHROMIUM.
+ void DoTexImageIOSurface2DCHROMIUM(
+ GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint io_surface_id,
+ GLuint plane);
+
+ void DoCopyTextureCHROMIUM(
+ GLenum target,
+ GLuint source_id,
+ GLuint target_id,
+ GLint level,
+ GLenum internal_format,
+ GLenum dest_type);
+
+ // Wrapper for TexStorage2DEXT.
+ void DoTexStorage2DEXT(
+ GLenum target,
+ GLint levels,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height);
+
+ void DoProduceTextureCHROMIUM(GLenum target, const GLbyte* key);
+ void DoProduceTextureDirectCHROMIUM(GLuint texture, GLenum target,
+ const GLbyte* key);
+ void ProduceTextureRef(std::string func_name, TextureRef* texture_ref,
+ GLenum target, const GLbyte* data);
+
+ void DoConsumeTextureCHROMIUM(GLenum target, const GLbyte* key);
+ void DoCreateAndConsumeTextureCHROMIUM(GLenum target, const GLbyte* key,
+ GLuint client_id);
+
+ void DoBindTexImage2DCHROMIUM(
+ GLenum target,
+ GLint image_id);
+ void DoReleaseTexImage2DCHROMIUM(
+ GLenum target,
+ GLint image_id);
+
+ void DoTraceEndCHROMIUM(void);
+
+ void DoDrawBuffersEXT(GLsizei count, const GLenum* bufs);
+
+ void DoLoseContextCHROMIUM(GLenum current, GLenum other);
+
+ void DoMatrixLoadfCHROMIUM(GLenum matrix_mode, const GLfloat* matrix);
+ void DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode);
+
+ // Creates a Program for the given program.
+ Program* CreateProgram(
+ GLuint client_id, GLuint service_id) {
+ return program_manager()->CreateProgram(client_id, service_id);
+ }
+
+ // Gets the program info for the given program. Returns NULL if none exists.
+ Program* GetProgram(GLuint client_id) {
+ return program_manager()->GetProgram(client_id);
+ }
+
+#if defined(NDEBUG)
+ void LogClientServiceMapping(
+ const char* /* function_name */,
+ GLuint /* client_id */,
+ GLuint /* service_id */) {
+ }
+ template<typename T>
+ void LogClientServiceForInfo(
+ T* /* info */, GLuint /* client_id */, const char* /* function_name */) {
+ }
+#else
+ void LogClientServiceMapping(
+ const char* function_name, GLuint client_id, GLuint service_id) {
+ if (service_logging_) {
+ VLOG(1) << "[" << logger_.GetLogPrefix() << "] " << function_name
+ << ": client_id = " << client_id
+ << ", service_id = " << service_id;
+ }
+ }
+ template<typename T>
+ void LogClientServiceForInfo(
+ T* info, GLuint client_id, const char* function_name) {
+ if (info) {
+ LogClientServiceMapping(function_name, client_id, info->service_id());
+ }
+ }
+#endif
+
+ // Gets the program info for the given program. If it's not a program
+ // generates a GL error. Returns NULL if not program.
+ Program* GetProgramInfoNotShader(
+ GLuint client_id, const char* function_name) {
+ Program* program = GetProgram(client_id);
+ if (!program) {
+ if (GetShader(client_id)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "shader passed for program");
+ } else {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "unknown program");
+ }
+ }
+ LogClientServiceForInfo(program, client_id, function_name);
+ return program;
+ }
+
+
+ // Creates a Shader for the given shader.
+ Shader* CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type) {
+ return shader_manager()->CreateShader(
+ client_id, service_id, shader_type);
+ }
+
+ // Gets the shader info for the given shader. Returns NULL if none exists.
+ Shader* GetShader(GLuint client_id) {
+ return shader_manager()->GetShader(client_id);
+ }
+
+ // Gets the shader info for the given shader. If it's not a shader generates a
+ // GL error. Returns NULL if not shader.
+ Shader* GetShaderInfoNotProgram(
+ GLuint client_id, const char* function_name) {
+ Shader* shader = GetShader(client_id);
+ if (!shader) {
+ if (GetProgram(client_id)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "program passed for shader");
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "unknown shader");
+ }
+ }
+ LogClientServiceForInfo(shader, client_id, function_name);
+ return shader;
+ }
+
+ // Creates a buffer info for the given buffer.
+ void CreateBuffer(GLuint client_id, GLuint service_id) {
+ return buffer_manager()->CreateBuffer(client_id, service_id);
+ }
+
+ // Gets the buffer info for the given buffer.
+ Buffer* GetBuffer(GLuint client_id) {
+ Buffer* buffer = buffer_manager()->GetBuffer(client_id);
+ return buffer;
+ }
+
+ // Removes any buffers in the VertexAtrribInfos and BufferInfos. This is used
+ // on glDeleteBuffers so we can make sure the user does not try to render
+ // with deleted buffers.
+ void RemoveBuffer(GLuint client_id);
+
+ // Creates a framebuffer info for the given framebuffer.
+ void CreateFramebuffer(GLuint client_id, GLuint service_id) {
+ return framebuffer_manager()->CreateFramebuffer(client_id, service_id);
+ }
+
+ // Gets the framebuffer info for the given framebuffer.
+ Framebuffer* GetFramebuffer(GLuint client_id) {
+ return framebuffer_manager()->GetFramebuffer(client_id);
+ }
+
+ // Removes the framebuffer info for the given framebuffer.
+ void RemoveFramebuffer(GLuint client_id) {
+ framebuffer_manager()->RemoveFramebuffer(client_id);
+ }
+
+ // Creates a renderbuffer info for the given renderbuffer.
+ void CreateRenderbuffer(GLuint client_id, GLuint service_id) {
+ return renderbuffer_manager()->CreateRenderbuffer(
+ client_id, service_id);
+ }
+
+ // Gets the renderbuffer info for the given renderbuffer.
+ Renderbuffer* GetRenderbuffer(GLuint client_id) {
+ return renderbuffer_manager()->GetRenderbuffer(client_id);
+ }
+
+ // Removes the renderbuffer info for the given renderbuffer.
+ void RemoveRenderbuffer(GLuint client_id) {
+ renderbuffer_manager()->RemoveRenderbuffer(client_id);
+ }
+
+ // Gets the vertex attrib manager for the given vertex array.
+ VertexAttribManager* GetVertexAttribManager(GLuint client_id) {
+ VertexAttribManager* info =
+ vertex_array_manager()->GetVertexAttribManager(client_id);
+ return info;
+ }
+
+ // Removes the vertex attrib manager for the given vertex array.
+ void RemoveVertexAttribManager(GLuint client_id) {
+ vertex_array_manager()->RemoveVertexAttribManager(client_id);
+ }
+
+ // Creates a vertex attrib manager for the given vertex array.
+ scoped_refptr<VertexAttribManager> CreateVertexAttribManager(
+ GLuint client_id,
+ GLuint service_id,
+ bool client_visible) {
+ return vertex_array_manager()->CreateVertexAttribManager(
+ client_id, service_id, group_->max_vertex_attribs(), client_visible);
+ }
+
+ void DoBindAttribLocation(GLuint client_id, GLuint index, const char* name);
+ void DoBindUniformLocationCHROMIUM(
+ GLuint client_id, GLint location, const char* name);
+
+ error::Error GetAttribLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str);
+
+ error::Error GetUniformLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str);
+
+ // Helper for glShaderSource.
+ error::Error ShaderSourceHelper(
+ GLuint client_id, const char* data, uint32 data_size);
+
+ // Clear any textures used by the current program.
+ bool ClearUnclearedTextures();
+
+ // Clears any uncleared attachments attached to the given frame buffer.
+ // Returns false if there was a generated GL error.
+ void ClearUnclearedAttachments(GLenum target, Framebuffer* framebuffer);
+
+ // overridden from GLES2Decoder
+ virtual bool ClearLevel(unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) OVERRIDE;
+
+ // Restore all GL state that affects clearing.
+ void RestoreClearState();
+
+ // Remembers the state of some capabilities.
+ // Returns: true if glEnable/glDisable should actually be called.
+ bool SetCapabilityState(GLenum cap, bool enabled);
+
+ // Check that the currently bound framebuffers are valid.
+ // Generates GL error if not.
+ bool CheckBoundFramebuffersValid(const char* func_name);
+
+ // Check that the currently bound read framebuffer has a color image
+ // attached. Generates GL error if not.
+ bool CheckBoundReadFramebufferColorAttachment(const char* func_name);
+
+ // Check if a framebuffer meets our requirements.
+ bool CheckFramebufferValid(
+ Framebuffer* framebuffer,
+ GLenum target,
+ const char* func_name);
+
+ // Checks if the current program exists and is valid. If not generates the
+ // appropriate GL error. Returns true if the current program is in a usable
+ // state.
+ bool CheckCurrentProgram(const char* function_name);
+
+ // Checks if the current program exists and is valid and that location is not
+ // -1. If the current program is not valid generates the appropriate GL
+ // error. Returns true if the current program is in a usable state and
+ // location is not -1.
+ bool CheckCurrentProgramForUniform(GLint location, const char* function_name);
+
+ // Gets the type of a uniform for a location in the current program. Sets GL
+ // errors if the current program is not valid. Returns true if the current
+ // program is valid and the location exists. Adjusts count so it
+ // does not overflow the uniform.
+ bool PrepForSetUniformByLocation(GLint fake_location,
+ const char* function_name,
+ Program::UniformApiType api_type,
+ GLint* real_location,
+ GLenum* type,
+ GLsizei* count);
+
+ // Gets the service id for any simulated backbuffer fbo.
+ GLuint GetBackbufferServiceId() const;
+
+ // Helper for glGetBooleanv, glGetFloatv and glGetIntegerv
+ bool GetHelper(GLenum pname, GLint* params, GLsizei* num_written);
+
+ // Helper for glGetVertexAttrib
+ void GetVertexAttribHelper(
+ const VertexAttrib* attrib, GLenum pname, GLint* param);
+
+ // Wrapper for glCreateProgram
+ bool CreateProgramHelper(GLuint client_id);
+
+ // Wrapper for glCreateShader
+ bool CreateShaderHelper(GLenum type, GLuint client_id);
+
+ // Wrapper for glActiveTexture
+ void DoActiveTexture(GLenum texture_unit);
+
+ // Wrapper for glAttachShader
+ void DoAttachShader(GLuint client_program_id, GLint client_shader_id);
+
+ // Wrapper for glBindBuffer since we need to track the current targets.
+ void DoBindBuffer(GLenum target, GLuint buffer);
+
+ // Wrapper for glBindFramebuffer since we need to track the current targets.
+ void DoBindFramebuffer(GLenum target, GLuint framebuffer);
+
+ // Wrapper for glBindRenderbuffer since we need to track the current targets.
+ void DoBindRenderbuffer(GLenum target, GLuint renderbuffer);
+
+ // Wrapper for glBindTexture since we need to track the current targets.
+ void DoBindTexture(GLenum target, GLuint texture);
+
+ // Wrapper for glBindVertexArrayOES
+ void DoBindVertexArrayOES(GLuint array);
+ void EmulateVertexArrayState();
+
+ // Wrapper for glBlitFramebufferCHROMIUM.
+ void DoBlitFramebufferCHROMIUM(
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter);
+
+ // Wrapper for glBufferSubData.
+ void DoBufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data);
+
+ // Wrapper for glCheckFramebufferStatus
+ GLenum DoCheckFramebufferStatus(GLenum target);
+
+ // Wrapper for glClear
+ error::Error DoClear(GLbitfield mask);
+
+ // Wrappers for various state.
+ void DoDepthRangef(GLclampf znear, GLclampf zfar);
+ void DoSampleCoverage(GLclampf value, GLboolean invert);
+
+ // Wrapper for glCompileShader.
+ void DoCompileShader(GLuint shader);
+
+ // Wrapper for glDetachShader
+ void DoDetachShader(GLuint client_program_id, GLint client_shader_id);
+
+ // Wrapper for glDisable
+ void DoDisable(GLenum cap);
+
+ // Wrapper for glDisableVertexAttribArray.
+ void DoDisableVertexAttribArray(GLuint index);
+
+ // Wrapper for glDiscardFramebufferEXT, since we need to track undefined
+ // attachments.
+ void DoDiscardFramebufferEXT(GLenum target,
+ GLsizei numAttachments,
+ const GLenum* attachments);
+
+ // Wrapper for glEnable
+ void DoEnable(GLenum cap);
+
+ // Wrapper for glEnableVertexAttribArray.
+ void DoEnableVertexAttribArray(GLuint index);
+
+ // Wrapper for glFinish.
+ void DoFinish();
+
+ // Wrapper for glFlush.
+ void DoFlush();
+
+ // Wrapper for glFramebufferRenderbufffer.
+ void DoFramebufferRenderbuffer(
+ GLenum target, GLenum attachment, GLenum renderbuffertarget,
+ GLuint renderbuffer);
+
+ // Wrapper for glFramebufferTexture2D.
+ void DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget, GLuint texture,
+ GLint level);
+
+ // Wrapper for glFramebufferTexture2DMultisampleEXT.
+ void DoFramebufferTexture2DMultisample(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture, GLint level, GLsizei samples);
+
+ // Common implementation for both DoFramebufferTexture2D wrappers.
+ void DoFramebufferTexture2DCommon(const char* name,
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture, GLint level, GLsizei samples);
+
+ // Wrapper for glGenerateMipmap
+ void DoGenerateMipmap(GLenum target);
+
+ // Helper for DoGetBooleanv, Floatv, and Intergerv to adjust pname
+ // to account for different pname values defined in different extension
+ // variants.
+ GLenum AdjustGetPname(GLenum pname);
+
+ // Wrapper for DoGetBooleanv.
+ void DoGetBooleanv(GLenum pname, GLboolean* params);
+
+ // Wrapper for DoGetFloatv.
+ void DoGetFloatv(GLenum pname, GLfloat* params);
+
+ // Wrapper for glGetFramebufferAttachmentParameteriv.
+ void DoGetFramebufferAttachmentParameteriv(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params);
+
+ // Wrapper for glGetIntegerv.
+ void DoGetIntegerv(GLenum pname, GLint* params);
+
+ // Gets the max value in a range in a buffer.
+ GLuint DoGetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset);
+
+ // Wrapper for glGetBufferParameteriv.
+ void DoGetBufferParameteriv(
+ GLenum target, GLenum pname, GLint* params);
+
+ // Wrapper for glGetProgramiv.
+ void DoGetProgramiv(
+ GLuint program_id, GLenum pname, GLint* params);
+
+ // Wrapper for glRenderbufferParameteriv.
+ void DoGetRenderbufferParameteriv(
+ GLenum target, GLenum pname, GLint* params);
+
+ // Wrapper for glGetShaderiv
+ void DoGetShaderiv(GLuint shader, GLenum pname, GLint* params);
+
+ // Wrappers for glGetTexParameter.
+ void DoGetTexParameterfv(GLenum target, GLenum pname, GLfloat* params);
+ void DoGetTexParameteriv(GLenum target, GLenum pname, GLint* params);
+ void InitTextureMaxAnisotropyIfNeeded(GLenum target, GLenum pname);
+
+ // Wrappers for glGetVertexAttrib.
+ void DoGetVertexAttribfv(GLuint index, GLenum pname, GLfloat *params);
+ void DoGetVertexAttribiv(GLuint index, GLenum pname, GLint *params);
+
+ // Wrappers for glIsXXX functions.
+ bool DoIsEnabled(GLenum cap);
+ bool DoIsBuffer(GLuint client_id);
+ bool DoIsFramebuffer(GLuint client_id);
+ bool DoIsProgram(GLuint client_id);
+ bool DoIsRenderbuffer(GLuint client_id);
+ bool DoIsShader(GLuint client_id);
+ bool DoIsTexture(GLuint client_id);
+ bool DoIsVertexArrayOES(GLuint client_id);
+
+ // Wrapper for glLinkProgram
+ void DoLinkProgram(GLuint program);
+
+ // Wrapper for glRenderbufferStorage.
+ void DoRenderbufferStorage(
+ GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+
+ // Handler for glRenderbufferStorageMultisampleCHROMIUM.
+ void DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height);
+
+ // Handler for glRenderbufferStorageMultisampleEXT
+ // (multisampled_render_to_texture).
+ void DoRenderbufferStorageMultisampleEXT(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height);
+
+ // Common validation for multisample extensions.
+ bool ValidateRenderbufferStorageMultisample(GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
+
+ // Verifies that the currently bound multisample renderbuffer is valid
+ // Very slow! Only done on platforms with driver bugs that return invalid
+ // buffers under memory pressure
+ bool VerifyMultisampleRenderbufferIntegrity(
+ GLuint renderbuffer, GLenum format);
+
+ // Wrapper for glReleaseShaderCompiler.
+ void DoReleaseShaderCompiler() { }
+
+ // Wrappers for glTexParameter functions.
+ void DoTexParameterf(GLenum target, GLenum pname, GLfloat param);
+ void DoTexParameteri(GLenum target, GLenum pname, GLint param);
+ void DoTexParameterfv(GLenum target, GLenum pname, const GLfloat* params);
+ void DoTexParameteriv(GLenum target, GLenum pname, const GLint* params);
+
+ // Wrappers for glUniform1i and glUniform1iv as according to the GLES2
+ // spec only these 2 functions can be used to set sampler uniforms.
+ void DoUniform1i(GLint fake_location, GLint v0);
+ void DoUniform1iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform2iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform3iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform4iv(GLint fake_location, GLsizei count, const GLint* value);
+
+ // Wrappers for glUniformfv because some drivers don't correctly accept
+ // bool uniforms.
+ void DoUniform1fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform2fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform3fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform4fv(GLint fake_location, GLsizei count, const GLfloat* value);
+
+ void DoUniformMatrix2fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+ void DoUniformMatrix3fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+ void DoUniformMatrix4fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+
+ bool SetVertexAttribValue(
+ const char* function_name, GLuint index, const GLfloat* value);
+
+ // Wrappers for glVertexAttrib??
+ void DoVertexAttrib1f(GLuint index, GLfloat v0);
+ void DoVertexAttrib2f(GLuint index, GLfloat v0, GLfloat v1);
+ void DoVertexAttrib3f(GLuint index, GLfloat v0, GLfloat v1, GLfloat v2);
+ void DoVertexAttrib4f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+ void DoVertexAttrib1fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib2fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib3fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib4fv(GLuint index, const GLfloat *v);
+
+ // Wrapper for glViewport
+ void DoViewport(GLint x, GLint y, GLsizei width, GLsizei height);
+
+ // Wrapper for glUseProgram
+ void DoUseProgram(GLuint program);
+
+ // Wrapper for glValidateProgram.
+ void DoValidateProgram(GLuint program_client_id);
+
+ void DoInsertEventMarkerEXT(GLsizei length, const GLchar* marker);
+ void DoPushGroupMarkerEXT(GLsizei length, const GLchar* group);
+ void DoPopGroupMarkerEXT(void);
+
+ // Gets the number of values that will be returned by glGetXXX. Returns
+ // false if pname is unknown.
+ bool GetNumValuesReturnedForGLGet(GLenum pname, GLsizei* num_values);
+
+ // Checks if the current program and vertex attributes are valid for drawing.
+ bool IsDrawValid(
+ const char* function_name, GLuint max_vertex_accessed, bool instanced,
+ GLsizei primcount);
+
+ // Returns true if successful, simulated will be true if attrib0 was
+ // simulated.
+ bool SimulateAttrib0(
+ const char* function_name, GLuint max_vertex_accessed, bool* simulated);
+ void RestoreStateForAttrib(GLuint attrib, bool restore_array_binding);
+
+ // If an image is bound to texture, this will call Will/DidUseTexImage
+ // if needed.
+ void DoWillUseTexImageIfNeeded(Texture* texture, GLenum textarget);
+ void DoDidUseTexImageIfNeeded(Texture* texture, GLenum textarget);
+
+ // Returns false if textures were replaced.
+ bool PrepareTexturesForRender();
+ void RestoreStateForTextures();
+
+ // Returns true if GL_FIXED attribs were simulated.
+ bool SimulateFixedAttribs(
+ const char* function_name,
+ GLuint max_vertex_accessed, bool* simulated, GLsizei primcount);
+ void RestoreStateForSimulatedFixedAttribs();
+
+ // Handle DrawArrays and DrawElements for both instanced and non-instanced
+ // cases (primcount is always 1 for non-instanced).
+ error::Error DoDrawArrays(
+ const char* function_name,
+ bool instanced, GLenum mode, GLint first, GLsizei count,
+ GLsizei primcount);
+ error::Error DoDrawElements(
+ const char* function_name,
+ bool instanced, GLenum mode, GLsizei count, GLenum type,
+ int32 offset, GLsizei primcount);
+
+ GLenum GetBindTargetForSamplerType(GLenum type) {
+ DCHECK(type == GL_SAMPLER_2D || type == GL_SAMPLER_CUBE ||
+ type == GL_SAMPLER_EXTERNAL_OES || type == GL_SAMPLER_2D_RECT_ARB);
+ switch (type) {
+ case GL_SAMPLER_2D:
+ return GL_TEXTURE_2D;
+ case GL_SAMPLER_CUBE:
+ return GL_TEXTURE_CUBE_MAP;
+ case GL_SAMPLER_EXTERNAL_OES:
+ return GL_TEXTURE_EXTERNAL_OES;
+ case GL_SAMPLER_2D_RECT_ARB:
+ return GL_TEXTURE_RECTANGLE_ARB;
+ }
+
+ NOTREACHED();
+ return 0;
+ }
+
+ // Gets the framebuffer info for a particular target.
+ Framebuffer* GetFramebufferInfoForTarget(GLenum target) {
+ Framebuffer* framebuffer = NULL;
+ switch (target) {
+ case GL_FRAMEBUFFER:
+ case GL_DRAW_FRAMEBUFFER_EXT:
+ framebuffer = framebuffer_state_.bound_draw_framebuffer.get();
+ break;
+ case GL_READ_FRAMEBUFFER_EXT:
+ framebuffer = framebuffer_state_.bound_read_framebuffer.get();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ return framebuffer;
+ }
+
+ Renderbuffer* GetRenderbufferInfoForTarget(
+ GLenum target) {
+ Renderbuffer* renderbuffer = NULL;
+ switch (target) {
+ case GL_RENDERBUFFER:
+ renderbuffer = state_.bound_renderbuffer.get();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ return renderbuffer;
+ }
+
+ // Validates the program and location for a glGetUniform call and returns
+ // a SizeResult setup to receive the result. Returns true if glGetUniform
+ // should be called.
+ bool GetUniformSetup(
+ GLuint program, GLint fake_location,
+ uint32 shm_id, uint32 shm_offset,
+ error::Error* error, GLint* real_location, GLuint* service_id,
+ void** result, GLenum* result_type);
+
+ virtual bool WasContextLost() OVERRIDE;
+ virtual bool WasContextLostByRobustnessExtension() OVERRIDE;
+ virtual void LoseContext(uint32 reset_status) OVERRIDE;
+
+#if defined(OS_MACOSX)
+ void ReleaseIOSurfaceForTexture(GLuint texture_id);
+#endif
+
+ bool ValidateCompressedTexDimensions(
+ const char* function_name,
+ GLint level, GLsizei width, GLsizei height, GLenum format);
+ bool ValidateCompressedTexFuncData(
+ const char* function_name,
+ GLsizei width, GLsizei height, GLenum format, size_t size);
+ bool ValidateCompressedTexSubDimensions(
+ const char* function_name,
+ GLenum target, GLint level, GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height, GLenum format,
+ Texture* texture);
+
+ void RenderWarning(const char* filename, int line, const std::string& msg);
+ void PerformanceWarning(
+ const char* filename, int line, const std::string& msg);
+
+ const FeatureInfo::FeatureFlags& features() const {
+ return feature_info_->feature_flags();
+ }
+
+ const FeatureInfo::Workarounds& workarounds() const {
+ return feature_info_->workarounds();
+ }
+
+ bool ShouldDeferDraws() {
+ return !offscreen_target_frame_buffer_.get() &&
+ framebuffer_state_.bound_draw_framebuffer.get() == NULL &&
+ surface_->DeferDraws();
+ }
+
+ bool ShouldDeferReads() {
+ return !offscreen_target_frame_buffer_.get() &&
+ framebuffer_state_.bound_read_framebuffer.get() == NULL &&
+ surface_->DeferDraws();
+ }
+
+ error::Error WillAccessBoundFramebufferForDraw() {
+ if (ShouldDeferDraws())
+ return error::kDeferCommandUntilLater;
+ if (!offscreen_target_frame_buffer_.get() &&
+ !framebuffer_state_.bound_draw_framebuffer.get() &&
+ !surface_->SetBackbufferAllocation(true))
+ return error::kLostContext;
+ return error::kNoError;
+ }
+
+ error::Error WillAccessBoundFramebufferForRead() {
+ if (ShouldDeferReads())
+ return error::kDeferCommandUntilLater;
+ if (!offscreen_target_frame_buffer_.get() &&
+ !framebuffer_state_.bound_read_framebuffer.get() &&
+ !surface_->SetBackbufferAllocation(true))
+ return error::kLostContext;
+ return error::kNoError;
+ }
+
+ // Set remaining commands to process to 0 to force DoCommands to return
+ // and allow context preemption and GPU watchdog checks in GpuScheduler().
+ void ExitCommandProcessingEarly() { commands_to_process_ = 0; }
+
+ void ProcessPendingReadPixels();
+ void FinishReadPixels(const cmds::ReadPixels& c, GLuint buffer);
+
+ // Generate a member function prototype for each command in an automated and
+ // typesafe way.
+#define GLES2_CMD_OP(name) \
+ Error Handle##name(uint32 immediate_data_size, const void* data);
+
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+
+ #undef GLES2_CMD_OP
+
+ // The GL context this decoder renders to on behalf of the client.
+ scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<gfx::GLContext> context_;
+
+ // The ContextGroup for this decoder uses to track resources.
+ scoped_refptr<ContextGroup> group_;
+
+ DebugMarkerManager debug_marker_manager_;
+ Logger logger_;
+
+ // All the state for this context.
+ ContextState state_;
+
+ // Current width and height of the offscreen frame buffer.
+ gfx::Size offscreen_size_;
+
+ // Util to help with GL.
+ GLES2Util util_;
+
+ // unpack flip y as last set by glPixelStorei
+ bool unpack_flip_y_;
+
+ // unpack (un)premultiply alpha as last set by glPixelStorei
+ bool unpack_premultiply_alpha_;
+ bool unpack_unpremultiply_alpha_;
+
+ // The buffer we bind to attrib 0 since OpenGL requires it (ES does not).
+ GLuint attrib_0_buffer_id_;
+
+ // The value currently in attrib_0.
+ Vec4 attrib_0_value_;
+
+ // Whether or not the attrib_0 buffer holds the attrib_0_value.
+ bool attrib_0_buffer_matches_value_;
+
+ // The size of attrib 0.
+ GLsizei attrib_0_size_;
+
+ // The buffer used to simulate GL_FIXED attribs.
+ GLuint fixed_attrib_buffer_id_;
+
+ // The size of fiixed attrib buffer.
+ GLsizei fixed_attrib_buffer_size_;
+
+ // The offscreen frame buffer that the client renders to. With EGL, the
+ // depth and stencil buffers are separate. With regular GL there is a single
+ // packed depth stencil buffer in offscreen_target_depth_render_buffer_.
+ // offscreen_target_stencil_render_buffer_ is unused.
+ scoped_ptr<BackFramebuffer> offscreen_target_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_target_color_texture_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_color_render_buffer_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_depth_render_buffer_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_stencil_render_buffer_;
+ GLenum offscreen_target_color_format_;
+ GLenum offscreen_target_depth_format_;
+ GLenum offscreen_target_stencil_format_;
+ GLsizei offscreen_target_samples_;
+ GLboolean offscreen_target_buffer_preserved_;
+
+ // The copy that is saved when SwapBuffers is called.
+ scoped_ptr<BackFramebuffer> offscreen_saved_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_saved_color_texture_;
+ scoped_refptr<TextureRef>
+ offscreen_saved_color_texture_info_;
+
+ // The copy that is used as the destination for multi-sample resolves.
+ scoped_ptr<BackFramebuffer> offscreen_resolved_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_resolved_color_texture_;
+ GLenum offscreen_saved_color_format_;
+
+ scoped_ptr<QueryManager> query_manager_;
+
+ scoped_ptr<VertexArrayManager> vertex_array_manager_;
+
+ scoped_ptr<ImageManager> image_manager_;
+
+ base::Callback<void(gfx::Size, float)> resize_callback_;
+
+ WaitSyncPointCallback wait_sync_point_callback_;
+
+ ShaderCacheCallback shader_cache_callback_;
+
+ scoped_ptr<AsyncPixelTransferManager> async_pixel_transfer_manager_;
+
+ // The format of the back buffer_
+ GLenum back_buffer_color_format_;
+ bool back_buffer_has_depth_;
+ bool back_buffer_has_stencil_;
+
+ bool surfaceless_;
+
+ // Backbuffer attachments that are currently undefined.
+ uint32 backbuffer_needs_clear_bits_;
+
+ // The current decoder error communicates the decoder error through command
+ // processing functions that do not return the error value. Should be set only
+ // if not returning an error.
+ error::Error current_decoder_error_;
+
+ bool use_shader_translator_;
+ scoped_refptr<ShaderTranslator> vertex_translator_;
+ scoped_refptr<ShaderTranslator> fragment_translator_;
+
+ DisallowedFeatures disallowed_features_;
+
+ // Cached from ContextGroup
+ const Validators* validators_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ int frame_number_;
+
+ // Number of commands remaining to be processed in DoCommands().
+ int commands_to_process_;
+
+ bool has_robustness_extension_;
+ GLenum reset_status_;
+ bool reset_by_robustness_extension_;
+ bool supports_post_sub_buffer_;
+
+ // These flags are used to override the state of the shared feature_info_
+ // member. Because the same FeatureInfo instance may be shared among many
+ // contexts, the assumptions on the availablity of extensions in WebGL
+ // contexts may be broken. These flags override the shared state to preserve
+ // WebGL semantics.
+ bool force_webgl_glsl_validation_;
+ bool derivatives_explicitly_enabled_;
+ bool frag_depth_explicitly_enabled_;
+ bool draw_buffers_explicitly_enabled_;
+ bool shader_texture_lod_explicitly_enabled_;
+
+ bool compile_shader_always_succeeds_;
+
+ // An optional behaviour to lose the context and group when OOM.
+ bool lose_context_when_out_of_memory_;
+
+ // Log extra info.
+ bool service_logging_;
+
+#if defined(OS_MACOSX)
+ typedef std::map<GLuint, IOSurfaceRef> TextureToIOSurfaceMap;
+ TextureToIOSurfaceMap texture_to_io_surface_map_;
+#endif
+
+ scoped_ptr<CopyTextureCHROMIUMResourceManager> copy_texture_CHROMIUM_;
+
+ // Cached values of the currently assigned viewport dimensions.
+ GLsizei viewport_max_width_;
+ GLsizei viewport_max_height_;
+
+ // Command buffer stats.
+ base::TimeDelta total_processing_commands_time_;
+
+ // States related to each manager.
+ DecoderTextureState texture_state_;
+ DecoderFramebufferState framebuffer_state_;
+
+ scoped_ptr<GPUTracer> gpu_tracer_;
+ scoped_ptr<GPUStateTracer> gpu_state_tracer_;
+ const unsigned char* cb_command_trace_category_;
+ int gpu_trace_level_;
+ bool gpu_trace_commands_;
+ bool gpu_debug_commands_;
+
+ std::queue<linked_ptr<FenceCallback> > pending_readpixel_fences_;
+
+ // Used to validate multisample renderbuffers if needed
+ GLuint validation_texture_;
+ GLuint validation_fbo_multisample_;
+ GLuint validation_fbo_;
+
+ typedef gpu::gles2::GLES2Decoder::Error (GLES2DecoderImpl::*CmdHandler)(
+ uint32 immediate_data_size,
+ const void* data);
+
+ // A struct to hold info about each command.
+ struct CommandInfo {
+ CmdHandler cmd_handler;
+ uint8 arg_flags; // How to handle the arguments for this command
+ uint8 cmd_flags; // How to handle this command
+ uint16 arg_count; // How many arguments are expected for this command.
+ };
+
+ // A table of CommandInfo for all the commands.
+ static const CommandInfo command_info[kNumCommands - kStartPoint];
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2DecoderImpl);
+};
+
+const GLES2DecoderImpl::CommandInfo GLES2DecoderImpl::command_info[] = {
+#define GLES2_CMD_OP(name) \
+ { \
+ &GLES2DecoderImpl::Handle##name, cmds::name::kArgFlags, \
+ cmds::name::cmd_flags, \
+ sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
+ } \
+ , /* NOLINT */
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+#undef GLES2_CMD_OP
+};
+
+ScopedGLErrorSuppressor::ScopedGLErrorSuppressor(
+ const char* function_name, ErrorState* error_state)
+ : function_name_(function_name),
+ error_state_(error_state) {
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_, function_name_);
+}
+
+ScopedGLErrorSuppressor::~ScopedGLErrorSuppressor() {
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_, function_name_);
+}
+
+static void RestoreCurrentTextureBindings(ContextState* state, GLenum target) {
+ TextureUnit& info = state->texture_units[0];
+ GLuint last_id;
+ scoped_refptr<TextureRef> texture_ref;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_ref = info.bound_texture_2d;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_ref = info.bound_texture_cube_map;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_ref = info.bound_texture_external_oes;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_ref = info.bound_texture_rectangle_arb;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ if (texture_ref.get()) {
+ last_id = texture_ref->service_id();
+ } else {
+ last_id = 0;
+ }
+
+ glBindTexture(target, last_id);
+ glActiveTexture(GL_TEXTURE0 + state->active_texture_unit);
+}
+
+ScopedTextureBinder::ScopedTextureBinder(ContextState* state,
+ GLuint id,
+ GLenum target)
+ : state_(state),
+ target_(target) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedTextureBinder::ctor", state_->GetErrorState());
+
+ // TODO(apatrick): Check if there are any other states that need to be reset
+ // before binding a new texture.
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(target, id);
+}
+
+ScopedTextureBinder::~ScopedTextureBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedTextureBinder::dtor", state_->GetErrorState());
+ RestoreCurrentTextureBindings(state_, target_);
+}
+
+ScopedRenderBufferBinder::ScopedRenderBufferBinder(ContextState* state,
+ GLuint id)
+ : state_(state) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedRenderBufferBinder::ctor", state_->GetErrorState());
+ glBindRenderbufferEXT(GL_RENDERBUFFER, id);
+}
+
+ScopedRenderBufferBinder::~ScopedRenderBufferBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedRenderBufferBinder::dtor", state_->GetErrorState());
+ state_->RestoreRenderbufferBindings();
+}
+
+ScopedFrameBufferBinder::ScopedFrameBufferBinder(GLES2DecoderImpl* decoder,
+ GLuint id)
+ : decoder_(decoder) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedFrameBufferBinder::ctor", decoder_->GetErrorState());
+ glBindFramebufferEXT(GL_FRAMEBUFFER, id);
+ decoder->OnFboChanged();
+}
+
+ScopedFrameBufferBinder::~ScopedFrameBufferBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedFrameBufferBinder::dtor", decoder_->GetErrorState());
+ decoder_->RestoreCurrentFramebufferBindings();
+}
+
+ScopedResolvedFrameBufferBinder::ScopedResolvedFrameBufferBinder(
+ GLES2DecoderImpl* decoder, bool enforce_internal_framebuffer, bool internal)
+ : decoder_(decoder) {
+ resolve_and_bind_ = (
+ decoder_->offscreen_target_frame_buffer_.get() &&
+ decoder_->IsOffscreenBufferMultisampled() &&
+ (!decoder_->framebuffer_state_.bound_read_framebuffer.get() ||
+ enforce_internal_framebuffer));
+ if (!resolve_and_bind_)
+ return;
+
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedResolvedFrameBufferBinder::ctor", decoder_->GetErrorState());
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT,
+ decoder_->offscreen_target_frame_buffer_->id());
+ GLuint targetid;
+ if (internal) {
+ if (!decoder_->offscreen_resolved_frame_buffer_.get()) {
+ decoder_->offscreen_resolved_frame_buffer_.reset(
+ new BackFramebuffer(decoder_));
+ decoder_->offscreen_resolved_frame_buffer_->Create();
+ decoder_->offscreen_resolved_color_texture_.reset(
+ new BackTexture(decoder->memory_tracker(), &decoder->state_));
+ decoder_->offscreen_resolved_color_texture_->Create();
+
+ DCHECK(decoder_->offscreen_saved_color_format_);
+ decoder_->offscreen_resolved_color_texture_->AllocateStorage(
+ decoder_->offscreen_size_, decoder_->offscreen_saved_color_format_,
+ false);
+ decoder_->offscreen_resolved_frame_buffer_->AttachRenderTexture(
+ decoder_->offscreen_resolved_color_texture_.get());
+ if (decoder_->offscreen_resolved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "ScopedResolvedFrameBufferBinder failed "
+ << "because offscreen resolved FBO was incomplete.";
+ return;
+ }
+ }
+ targetid = decoder_->offscreen_resolved_frame_buffer_->id();
+ } else {
+ targetid = decoder_->offscreen_saved_frame_buffer_->id();
+ }
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, targetid);
+ const int width = decoder_->offscreen_size_.width();
+ const int height = decoder_->offscreen_size_.height();
+ decoder->state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ decoder->BlitFramebufferHelper(0,
+ 0,
+ width,
+ height,
+ 0,
+ 0,
+ width,
+ height,
+ GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, targetid);
+}
+
+ScopedResolvedFrameBufferBinder::~ScopedResolvedFrameBufferBinder() {
+ if (!resolve_and_bind_)
+ return;
+
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedResolvedFrameBufferBinder::dtor", decoder_->GetErrorState());
+ decoder_->RestoreCurrentFramebufferBindings();
+ if (decoder_->state_.enable_flags.scissor_test) {
+ decoder_->state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+ }
+}
+
+BackTexture::BackTexture(
+ MemoryTracker* memory_tracker,
+ ContextState* state)
+ : memory_tracker_(memory_tracker, MemoryTracker::kUnmanaged),
+ state_(state),
+ bytes_allocated_(0),
+ id_(0) {
+}
+
+BackTexture::~BackTexture() {
+ // This does not destroy the render texture because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackTexture::Create() {
+ ScopedGLErrorSuppressor suppressor("BackTexture::Create",
+ state_->GetErrorState());
+ Destroy();
+ glGenTextures(1, &id_);
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // TODO(apatrick): Attempt to diagnose crbug.com/97775. If SwapBuffers is
+ // never called on an offscreen context, no data will ever be uploaded to the
+ // saved offscreen color texture (it is deferred until to when SwapBuffers
+ // is called). My idea is that some nvidia drivers might have a bug where
+ // deleting a texture that has never been populated might cause a
+ // crash.
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+
+ bytes_allocated_ = 16u * 16u * 4u;
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+}
+
+bool BackTexture::AllocateStorage(
+ const gfx::Size& size, GLenum format, bool zero) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackTexture::AllocateStorage",
+ state_->GetErrorState());
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ uint32 image_size = 0;
+ GLES2Util::ComputeImageDataSizes(
+ size.width(), size.height(), format, GL_UNSIGNED_BYTE, 8, &image_size,
+ NULL, NULL);
+
+ if (!memory_tracker_.EnsureGPUMemoryAvailable(image_size)) {
+ return false;
+ }
+
+ scoped_ptr<char[]> zero_data;
+ if (zero) {
+ zero_data.reset(new char[image_size]);
+ memset(zero_data.get(), 0, image_size);
+ }
+
+ glTexImage2D(GL_TEXTURE_2D,
+ 0, // mip level
+ format,
+ size.width(),
+ size.height(),
+ 0, // border
+ format,
+ GL_UNSIGNED_BYTE,
+ zero_data.get());
+
+ size_ = size;
+
+ bool success = glGetError() == GL_NO_ERROR;
+ if (success) {
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = image_size;
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+ }
+ return success;
+}
+
+void BackTexture::Copy(const gfx::Size& size, GLenum format) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackTexture::Copy",
+ state_->GetErrorState());
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ glCopyTexImage2D(GL_TEXTURE_2D,
+ 0, // level
+ format,
+ 0, 0,
+ size.width(),
+ size.height(),
+ 0); // border
+}
+
+void BackTexture::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackTexture::Destroy",
+ state_->GetErrorState());
+ glDeleteTextures(1, &id_);
+ id_ = 0;
+ }
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
+}
+
+void BackTexture::Invalidate() {
+ id_ = 0;
+}
+
+BackRenderbuffer::BackRenderbuffer(
+ RenderbufferManager* renderbuffer_manager,
+ MemoryTracker* memory_tracker,
+ ContextState* state)
+ : renderbuffer_manager_(renderbuffer_manager),
+ memory_tracker_(memory_tracker, MemoryTracker::kUnmanaged),
+ state_(state),
+ bytes_allocated_(0),
+ id_(0) {
+}
+
+BackRenderbuffer::~BackRenderbuffer() {
+ // This does not destroy the render buffer because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackRenderbuffer::Create() {
+ ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Create",
+ state_->GetErrorState());
+ Destroy();
+ glGenRenderbuffersEXT(1, &id_);
+}
+
+bool BackRenderbuffer::AllocateStorage(const FeatureInfo* feature_info,
+ const gfx::Size& size,
+ GLenum format,
+ GLsizei samples) {
+ ScopedGLErrorSuppressor suppressor(
+ "BackRenderbuffer::AllocateStorage", state_->GetErrorState());
+ ScopedRenderBufferBinder binder(state_, id_);
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager_->ComputeEstimatedRenderbufferSize(
+ size.width(), size.height(), samples, format, &estimated_size)) {
+ return false;
+ }
+
+ if (!memory_tracker_.EnsureGPUMemoryAvailable(estimated_size)) {
+ return false;
+ }
+
+ if (samples <= 1) {
+ glRenderbufferStorageEXT(GL_RENDERBUFFER,
+ format,
+ size.width(),
+ size.height());
+ } else {
+ GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(feature_info,
+ GL_RENDERBUFFER,
+ samples,
+ format,
+ size.width(),
+ size.height());
+ }
+ bool success = glGetError() == GL_NO_ERROR;
+ if (success) {
+ // Mark the previously allocated bytes as free.
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = estimated_size;
+ // Track the newly allocated bytes.
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+ }
+ return success;
+}
+
+void BackRenderbuffer::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Destroy",
+ state_->GetErrorState());
+ glDeleteRenderbuffersEXT(1, &id_);
+ id_ = 0;
+ }
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
+}
+
+void BackRenderbuffer::Invalidate() {
+ id_ = 0;
+}
+
+BackFramebuffer::BackFramebuffer(GLES2DecoderImpl* decoder)
+ : decoder_(decoder),
+ id_(0) {
+}
+
+BackFramebuffer::~BackFramebuffer() {
+ // This does not destroy the frame buffer because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackFramebuffer::Create() {
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::Create",
+ decoder_->GetErrorState());
+ Destroy();
+ glGenFramebuffersEXT(1, &id_);
+}
+
+void BackFramebuffer::AttachRenderTexture(BackTexture* texture) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor(
+ "BackFramebuffer::AttachRenderTexture", decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ GLuint attach_id = texture ? texture->id() : 0;
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ attach_id,
+ 0);
+}
+
+void BackFramebuffer::AttachRenderBuffer(GLenum target,
+ BackRenderbuffer* render_buffer) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor(
+ "BackFramebuffer::AttachRenderBuffer", decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ GLuint attach_id = render_buffer ? render_buffer->id() : 0;
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ target,
+ GL_RENDERBUFFER,
+ attach_id);
+}
+
+void BackFramebuffer::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::Destroy",
+ decoder_->GetErrorState());
+ glDeleteFramebuffersEXT(1, &id_);
+ id_ = 0;
+ }
+}
+
+void BackFramebuffer::Invalidate() {
+ id_ = 0;
+}
+
+GLenum BackFramebuffer::CheckStatus() {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::CheckStatus",
+ decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ return glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+}
+
+GLES2Decoder* GLES2Decoder::Create(ContextGroup* group) {
+ return new GLES2DecoderImpl(group);
+}
+
+GLES2DecoderImpl::GLES2DecoderImpl(ContextGroup* group)
+ : GLES2Decoder(),
+ group_(group),
+ logger_(&debug_marker_manager_),
+ state_(group_->feature_info(), this, &logger_),
+ unpack_flip_y_(false),
+ unpack_premultiply_alpha_(false),
+ unpack_unpremultiply_alpha_(false),
+ attrib_0_buffer_id_(0),
+ attrib_0_buffer_matches_value_(true),
+ attrib_0_size_(0),
+ fixed_attrib_buffer_id_(0),
+ fixed_attrib_buffer_size_(0),
+ offscreen_target_color_format_(0),
+ offscreen_target_depth_format_(0),
+ offscreen_target_stencil_format_(0),
+ offscreen_target_samples_(0),
+ offscreen_target_buffer_preserved_(true),
+ offscreen_saved_color_format_(0),
+ back_buffer_color_format_(0),
+ back_buffer_has_depth_(false),
+ back_buffer_has_stencil_(false),
+ surfaceless_(false),
+ backbuffer_needs_clear_bits_(0),
+ current_decoder_error_(error::kNoError),
+ use_shader_translator_(true),
+ validators_(group_->feature_info()->validators()),
+ feature_info_(group_->feature_info()),
+ frame_number_(0),
+ has_robustness_extension_(false),
+ reset_status_(GL_NO_ERROR),
+ reset_by_robustness_extension_(false),
+ supports_post_sub_buffer_(false),
+ force_webgl_glsl_validation_(false),
+ derivatives_explicitly_enabled_(false),
+ frag_depth_explicitly_enabled_(false),
+ draw_buffers_explicitly_enabled_(false),
+ shader_texture_lod_explicitly_enabled_(false),
+ compile_shader_always_succeeds_(false),
+ lose_context_when_out_of_memory_(false),
+ service_logging_(CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)),
+ viewport_max_width_(0),
+ viewport_max_height_(0),
+ texture_state_(group_->feature_info()
+ ->workarounds()
+ .texsubimage2d_faster_than_teximage2d),
+ cb_command_trace_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("cb_command"))),
+ gpu_trace_level_(2),
+ gpu_trace_commands_(false),
+ gpu_debug_commands_(false),
+ validation_texture_(0),
+ validation_fbo_multisample_(0),
+ validation_fbo_(0) {
+ DCHECK(group);
+
+ attrib_0_value_.v[0] = 0.0f;
+ attrib_0_value_.v[1] = 0.0f;
+ attrib_0_value_.v[2] = 0.0f;
+ attrib_0_value_.v[3] = 1.0f;
+
+ // The shader translator is used for WebGL even when running on EGL
+ // because additional restrictions are needed (like only enabling
+ // GL_OES_standard_derivatives on demand). It is used for the unit
+ // tests because GLES2DecoderWithShaderTest.GetShaderInfoLogValidArgs passes
+ // the empty string to CompileShader and this is not a valid shader.
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationMockGL ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGLSLTranslator)) {
+ use_shader_translator_ = false;
+ }
+}
+
+GLES2DecoderImpl::~GLES2DecoderImpl() {
+}
+
+bool GLES2DecoderImpl::Initialize(
+ const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::Initialize");
+ DCHECK(context->IsCurrent(surface.get()));
+ DCHECK(!context_.get());
+
+ surfaceless_ = surface->IsSurfaceless();
+
+ set_initialized();
+ gpu_tracer_.reset(new GPUTracer(this));
+ gpu_state_tracer_ = GPUStateTracer::Create(&state_);
+
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUDebugging)) {
+ set_debug(true);
+ }
+
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUCommandLogging)) {
+ set_log_commands(true);
+ }
+
+ compile_shader_always_succeeds_ = CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kCompileShaderAlwaysSucceeds);
+
+
+ // Take ownership of the context and surface. The surface can be replaced with
+ // SetSurface.
+ context_ = context;
+ surface_ = surface;
+
+ ContextCreationAttribHelper attrib_parser;
+ if (!attrib_parser.Parse(attribs))
+ return false;
+
+ // Save the loseContextWhenOutOfMemory context creation attribute.
+ lose_context_when_out_of_memory_ =
+ attrib_parser.lose_context_when_out_of_memory;
+
+ // If the failIfMajorPerformanceCaveat context creation attribute was true
+ // and we are using a software renderer, fail.
+ if (attrib_parser.fail_if_major_perf_caveat &&
+ feature_info_->feature_flags().is_swiftshader) {
+ group_ = NULL; // Must not destroy ContextGroup if it is not initialized.
+ Destroy(true);
+ return false;
+ }
+
+ if (!group_->Initialize(this, disallowed_features)) {
+ LOG(ERROR) << "GpuScheduler::InitializeCommon failed because group "
+ << "failed to initialize.";
+ group_ = NULL; // Must not destroy ContextGroup if it is not initialized.
+ Destroy(true);
+ return false;
+ }
+ CHECK_GL_ERROR();
+
+ disallowed_features_ = disallowed_features;
+
+ state_.attrib_values.resize(group_->max_vertex_attribs());
+ vertex_array_manager_.reset(new VertexArrayManager());
+
+ GLuint default_vertex_attrib_service_id = 0;
+ if (features().native_vertex_array_object) {
+ glGenVertexArraysOES(1, &default_vertex_attrib_service_id);
+ glBindVertexArrayOES(default_vertex_attrib_service_id);
+ }
+
+ state_.default_vertex_attrib_manager =
+ CreateVertexAttribManager(0, default_vertex_attrib_service_id, false);
+
+ state_.default_vertex_attrib_manager->Initialize(
+ group_->max_vertex_attribs(),
+ feature_info_->workarounds().init_vertex_attributes);
+
+ // vertex_attrib_manager is set to default_vertex_attrib_manager by this call
+ DoBindVertexArrayOES(0);
+
+ query_manager_.reset(new QueryManager(this, feature_info_.get()));
+
+ image_manager_.reset(new ImageManager);
+
+ util_.set_num_compressed_texture_formats(
+ validators_->compressed_texture_format.GetValues().size());
+
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ // We have to enable vertex array 0 on OpenGL or it won't render. Note that
+ // OpenGL ES 2.0 does not have this issue.
+ glEnableVertexAttribArray(0);
+ }
+ glGenBuffersARB(1, &attrib_0_buffer_id_);
+ glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
+ glVertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, NULL);
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glGenBuffersARB(1, &fixed_attrib_buffer_id_);
+
+ state_.texture_units.resize(group_->max_texture_units());
+ for (uint32 tt = 0; tt < state_.texture_units.size(); ++tt) {
+ glActiveTexture(GL_TEXTURE0 + tt);
+ // We want the last bind to be 2D.
+ TextureRef* ref;
+ if (features().oes_egl_image_external) {
+ ref = texture_manager()->GetDefaultTextureInfo(
+ GL_TEXTURE_EXTERNAL_OES);
+ state_.texture_units[tt].bound_texture_external_oes = ref;
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, ref ? ref->service_id() : 0);
+ }
+ if (features().arb_texture_rectangle) {
+ ref = texture_manager()->GetDefaultTextureInfo(
+ GL_TEXTURE_RECTANGLE_ARB);
+ state_.texture_units[tt].bound_texture_rectangle_arb = ref;
+ glBindTexture(GL_TEXTURE_RECTANGLE_ARB, ref ? ref->service_id() : 0);
+ }
+ ref = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP);
+ state_.texture_units[tt].bound_texture_cube_map = ref;
+ glBindTexture(GL_TEXTURE_CUBE_MAP, ref ? ref->service_id() : 0);
+ ref = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
+ state_.texture_units[tt].bound_texture_2d = ref;
+ glBindTexture(GL_TEXTURE_2D, ref ? ref->service_id() : 0);
+ }
+ glActiveTexture(GL_TEXTURE0);
+ CHECK_GL_ERROR();
+
+ if (offscreen) {
+ if (attrib_parser.samples > 0 && attrib_parser.sample_buffers > 0 &&
+ features().chromium_framebuffer_multisample) {
+ // Per ext_framebuffer_multisample spec, need max bound on sample count.
+ // max_sample_count must be initialized to a sane value. If
+ // glGetIntegerv() throws a GL error, it leaves its argument unchanged.
+ GLint max_sample_count = 1;
+ glGetIntegerv(GL_MAX_SAMPLES_EXT, &max_sample_count);
+ offscreen_target_samples_ = std::min(attrib_parser.samples,
+ max_sample_count);
+ } else {
+ offscreen_target_samples_ = 1;
+ }
+ offscreen_target_buffer_preserved_ = attrib_parser.buffer_preserved;
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ const bool rgb8_supported =
+ context_->HasExtension("GL_OES_rgb8_rgba8");
+ // The only available default render buffer formats in GLES2 have very
+ // little precision. Don't enable multisampling unless 8-bit render
+ // buffer formats are available--instead fall back to 8-bit textures.
+ if (rgb8_supported && offscreen_target_samples_ > 1) {
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA8 : GL_RGB8;
+ } else {
+ offscreen_target_samples_ = 1;
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+ }
+
+ // ANGLE only supports packed depth/stencil formats, so use it if it is
+ // available.
+ const bool depth24_stencil8_supported =
+ feature_info_->feature_flags().packed_depth24_stencil8;
+ VLOG(1) << "GL_OES_packed_depth_stencil "
+ << (depth24_stencil8_supported ? "" : "not ") << "supported.";
+ if ((attrib_parser.depth_size > 0 || attrib_parser.stencil_size > 0) &&
+ depth24_stencil8_supported) {
+ offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
+ offscreen_target_stencil_format_ = 0;
+ } else {
+ // It may be the case that this depth/stencil combination is not
+ // supported, but this will be checked later by CheckFramebufferStatus.
+ offscreen_target_depth_format_ = attrib_parser.depth_size > 0 ?
+ GL_DEPTH_COMPONENT16 : 0;
+ offscreen_target_stencil_format_ = attrib_parser.stencil_size > 0 ?
+ GL_STENCIL_INDEX8 : 0;
+ }
+ } else {
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+
+ // If depth is requested at all, use the packed depth stencil format if
+ // it's available, as some desktop GL drivers don't support any non-packed
+ // formats for depth attachments.
+ const bool depth24_stencil8_supported =
+ feature_info_->feature_flags().packed_depth24_stencil8;
+ VLOG(1) << "GL_EXT_packed_depth_stencil "
+ << (depth24_stencil8_supported ? "" : "not ") << "supported.";
+
+ if ((attrib_parser.depth_size > 0 || attrib_parser.stencil_size > 0) &&
+ depth24_stencil8_supported) {
+ offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
+ offscreen_target_stencil_format_ = 0;
+ } else {
+ offscreen_target_depth_format_ = attrib_parser.depth_size > 0 ?
+ GL_DEPTH_COMPONENT : 0;
+ offscreen_target_stencil_format_ = attrib_parser.stencil_size > 0 ?
+ GL_STENCIL_INDEX : 0;
+ }
+ }
+
+ offscreen_saved_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+
+ // Create the target frame buffer. This is the one that the client renders
+ // directly to.
+ offscreen_target_frame_buffer_.reset(new BackFramebuffer(this));
+ offscreen_target_frame_buffer_->Create();
+ // Due to GLES2 format limitations, either the color texture (for
+ // non-multisampling) or the color render buffer (for multisampling) will be
+ // attached to the offscreen frame buffer. The render buffer has more
+ // limited formats available to it, but the texture can't do multisampling.
+ if (IsOffscreenBufferMultisampled()) {
+ offscreen_target_color_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_color_render_buffer_->Create();
+ } else {
+ offscreen_target_color_texture_.reset(new BackTexture(
+ memory_tracker(), &state_));
+ offscreen_target_color_texture_->Create();
+ }
+ offscreen_target_depth_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_depth_render_buffer_->Create();
+ offscreen_target_stencil_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_stencil_render_buffer_->Create();
+
+ // Create the saved offscreen texture. The target frame buffer is copied
+ // here when SwapBuffers is called.
+ offscreen_saved_frame_buffer_.reset(new BackFramebuffer(this));
+ offscreen_saved_frame_buffer_->Create();
+ //
+ offscreen_saved_color_texture_.reset(new BackTexture(
+ memory_tracker(), &state_));
+ offscreen_saved_color_texture_->Create();
+
+ // Allocate the render buffers at their initial size and check the status
+ // of the frame buffers is okay.
+ if (!ResizeOffscreenFrameBuffer(size)) {
+ LOG(ERROR) << "Could not allocate offscreen buffer storage.";
+ Destroy(true);
+ return false;
+ }
+
+ // Allocate the offscreen saved color texture.
+ DCHECK(offscreen_saved_color_format_);
+ offscreen_saved_color_texture_->AllocateStorage(
+ gfx::Size(1, 1), offscreen_saved_color_format_, true);
+
+ offscreen_saved_frame_buffer_->AttachRenderTexture(
+ offscreen_saved_color_texture_.get());
+ if (offscreen_saved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "Offscreen saved FBO was incomplete.";
+ Destroy(true);
+ return false;
+ }
+
+ // Bind to the new default frame buffer (the offscreen target frame buffer).
+ // This should now be associated with ID zero.
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0);
+ } else {
+ glBindFramebufferEXT(GL_FRAMEBUFFER, GetBackbufferServiceId());
+ // These are NOT if the back buffer has these proprorties. They are
+ // if we want the command buffer to enforce them regardless of what
+ // the real backbuffer is assuming the real back buffer gives us more than
+ // we ask for. In other words, if we ask for RGB and we get RGBA then we'll
+ // make it appear RGB. If on the other hand we ask for RGBA nd get RGB we
+ // can't do anything about that.
+
+ if (!surfaceless_) {
+ GLint v = 0;
+ glGetIntegerv(GL_ALPHA_BITS, &v);
+ // This checks if the user requested RGBA and we have RGBA then RGBA. If
+ // the user requested RGB then RGB. If the user did not specify a
+ // preference than use whatever we were given. Same for DEPTH and STENCIL.
+ back_buffer_color_format_ =
+ (attrib_parser.alpha_size != 0 && v > 0) ? GL_RGBA : GL_RGB;
+ glGetIntegerv(GL_DEPTH_BITS, &v);
+ back_buffer_has_depth_ = attrib_parser.depth_size != 0 && v > 0;
+ glGetIntegerv(GL_STENCIL_BITS, &v);
+ back_buffer_has_stencil_ = attrib_parser.stencil_size != 0 && v > 0;
+ }
+ }
+
+ // OpenGL ES 2.0 implicitly enables the desktop GL capability
+ // VERTEX_PROGRAM_POINT_SIZE and doesn't expose this enum. This fact
+ // isn't well documented; it was discovered in the Khronos OpenGL ES
+ // mailing list archives. It also implicitly enables the desktop GL
+ // capability GL_POINT_SPRITE to provide access to the gl_PointCoord
+ // variable in fragment shaders.
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
+ glEnable(GL_POINT_SPRITE);
+ }
+
+ has_robustness_extension_ =
+ context->HasExtension("GL_ARB_robustness") ||
+ context->HasExtension("GL_EXT_robustness");
+
+ if (!InitializeShaderTranslator()) {
+ return false;
+ }
+
+ state_.viewport_width = size.width();
+ state_.viewport_height = size.height();
+
+ GLint viewport_params[4] = { 0 };
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, viewport_params);
+ viewport_max_width_ = viewport_params[0];
+ viewport_max_height_ = viewport_params[1];
+
+ state_.scissor_width = state_.viewport_width;
+ state_.scissor_height = state_.viewport_height;
+
+ // Set all the default state because some GL drivers get it wrong.
+ state_.InitCapabilities(NULL);
+ state_.InitState(NULL);
+ glActiveTexture(GL_TEXTURE0 + state_.active_texture_unit);
+
+ DoBindBuffer(GL_ARRAY_BUFFER, 0);
+ DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0);
+ DoBindRenderbuffer(GL_RENDERBUFFER, 0);
+
+ bool call_gl_clear = !surfaceless_;
+#if defined(OS_ANDROID)
+ // Temporary workaround for Android WebView because this clear ignores the
+ // clip and corrupts that external UI of the App. Not calling glClear is ok
+ // because the system already clears the buffer before each draw. Proper
+ // fix might be setting the scissor clip properly before initialize. See
+ // crbug.com/259023 for details.
+ call_gl_clear = surface_->GetHandle();
+#endif
+ if (call_gl_clear) {
+ // Clear the backbuffer.
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ }
+
+ supports_post_sub_buffer_ = surface->SupportsPostSubBuffer();
+ if (feature_info_->workarounds()
+ .disable_post_sub_buffers_for_onscreen_surfaces &&
+ !surface->IsOffscreen())
+ supports_post_sub_buffer_ = false;
+
+ if (feature_info_->workarounds().reverse_point_sprite_coord_origin) {
+ glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT);
+ }
+
+ if (feature_info_->workarounds().unbind_fbo_on_context_switch) {
+ context_->SetUnbindFboOnMakeCurrent();
+ }
+
+ // Only compositor contexts are known to use only the subset of GL
+ // that can be safely migrated between the iGPU and the dGPU. Mark
+ // those contexts as safe to forcibly transition between the GPUs.
+ // http://crbug.com/180876, http://crbug.com/227228
+ if (!offscreen)
+ context_->SetSafeToForceGpuSwitch();
+
+ async_pixel_transfer_manager_.reset(
+ AsyncPixelTransferManager::Create(context.get()));
+ async_pixel_transfer_manager_->Initialize(texture_manager());
+
+ framebuffer_manager()->AddObserver(this);
+
+ return true;
+}
+
+Capabilities GLES2DecoderImpl::GetCapabilities() {
+ DCHECK(initialized());
+
+ Capabilities caps;
+
+ caps.egl_image_external =
+ feature_info_->feature_flags().oes_egl_image_external;
+ caps.texture_format_bgra8888 =
+ feature_info_->feature_flags().ext_texture_format_bgra8888;
+ caps.texture_format_etc1 =
+ feature_info_->feature_flags().oes_compressed_etc1_rgb8_texture;
+ caps.texture_format_etc1_npot =
+ caps.texture_format_etc1 && !workarounds().etc1_power_of_two_only;
+ caps.texture_rectangle = feature_info_->feature_flags().arb_texture_rectangle;
+ caps.texture_usage = feature_info_->feature_flags().angle_texture_usage;
+ caps.texture_storage = feature_info_->feature_flags().ext_texture_storage;
+ caps.discard_framebuffer =
+ feature_info_->feature_flags().ext_discard_framebuffer;
+ caps.sync_query = feature_info_->feature_flags().chromium_sync_query;
+
+#if defined(OS_MACOSX)
+ // This is unconditionally true on mac, no need to test for it at runtime.
+ caps.iosurface = true;
+#endif
+
+ caps.post_sub_buffer = supports_post_sub_buffer_;
+ caps.image = true;
+
+ return caps;
+}
+
+void GLES2DecoderImpl::UpdateCapabilities() {
+ util_.set_num_compressed_texture_formats(
+ validators_->compressed_texture_format.GetValues().size());
+ util_.set_num_shader_binary_formats(
+ validators_->shader_binary_format.GetValues().size());
+}
+
+bool GLES2DecoderImpl::InitializeShaderTranslator() {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::InitializeShaderTranslator");
+
+ if (!use_shader_translator_) {
+ return true;
+ }
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+ resources.MaxVertexAttribs = group_->max_vertex_attribs();
+ resources.MaxVertexUniformVectors =
+ group_->max_vertex_uniform_vectors();
+ resources.MaxVaryingVectors = group_->max_varying_vectors();
+ resources.MaxVertexTextureImageUnits =
+ group_->max_vertex_texture_image_units();
+ resources.MaxCombinedTextureImageUnits = group_->max_texture_units();
+ resources.MaxTextureImageUnits = group_->max_texture_image_units();
+ resources.MaxFragmentUniformVectors =
+ group_->max_fragment_uniform_vectors();
+ resources.MaxDrawBuffers = group_->max_draw_buffers();
+ resources.MaxExpressionComplexity = 256;
+ resources.MaxCallStackDepth = 256;
+
+ GLint range[2] = { 0, 0 };
+ GLint precision = 0;
+ GetShaderPrecisionFormatImpl(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ range, &precision);
+ resources.FragmentPrecisionHigh =
+ PrecisionMeetsSpecForHighpFloat(range[0], range[1], precision);
+
+ if (force_webgl_glsl_validation_) {
+ resources.OES_standard_derivatives = derivatives_explicitly_enabled_;
+ resources.EXT_frag_depth = frag_depth_explicitly_enabled_;
+ resources.EXT_draw_buffers = draw_buffers_explicitly_enabled_;
+ if (!draw_buffers_explicitly_enabled_)
+ resources.MaxDrawBuffers = 1;
+ resources.EXT_shader_texture_lod = shader_texture_lod_explicitly_enabled_;
+ } else {
+ resources.OES_standard_derivatives =
+ features().oes_standard_derivatives ? 1 : 0;
+ resources.ARB_texture_rectangle =
+ features().arb_texture_rectangle ? 1 : 0;
+ resources.OES_EGL_image_external =
+ features().oes_egl_image_external ? 1 : 0;
+ resources.EXT_draw_buffers =
+ features().ext_draw_buffers ? 1 : 0;
+ resources.EXT_frag_depth =
+ features().ext_frag_depth ? 1 : 0;
+ resources.EXT_shader_texture_lod =
+ features().ext_shader_texture_lod ? 1 : 0;
+ }
+
+ ShShaderSpec shader_spec = force_webgl_glsl_validation_ ? SH_WEBGL_SPEC
+ : SH_GLES2_SPEC;
+ if (shader_spec == SH_WEBGL_SPEC && features().enable_shader_name_hashing)
+ resources.HashFunction = &CityHash64;
+ else
+ resources.HashFunction = NULL;
+ ShaderTranslatorInterface::GlslImplementationType implementation_type =
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2 ?
+ ShaderTranslatorInterface::kGlslES : ShaderTranslatorInterface::kGlsl;
+ int driver_bug_workarounds = 0;
+ if (workarounds().needs_glsl_built_in_function_emulation)
+ driver_bug_workarounds |= SH_EMULATE_BUILT_IN_FUNCTIONS;
+ if (workarounds().init_gl_position_in_vertex_shader)
+ driver_bug_workarounds |= SH_INIT_GL_POSITION;
+ if (workarounds().unfold_short_circuit_as_ternary_operation)
+ driver_bug_workarounds |= SH_UNFOLD_SHORT_CIRCUIT;
+ if (workarounds().init_varyings_without_static_use)
+ driver_bug_workarounds |= SH_INIT_VARYINGS_WITHOUT_STATIC_USE;
+ if (workarounds().unroll_for_loop_with_sampler_array_index)
+ driver_bug_workarounds |= SH_UNROLL_FOR_LOOP_WITH_SAMPLER_ARRAY_INDEX;
+ if (workarounds().scalarize_vec_and_mat_constructor_args)
+ driver_bug_workarounds |= SH_SCALARIZE_VEC_AND_MAT_CONSTRUCTOR_ARGS;
+ if (workarounds().regenerate_struct_names)
+ driver_bug_workarounds |= SH_REGENERATE_STRUCT_NAMES;
+
+ vertex_translator_ = shader_translator_cache()->GetTranslator(
+ GL_VERTEX_SHADER,
+ shader_spec,
+ &resources,
+ implementation_type,
+ static_cast<ShCompileOptions>(driver_bug_workarounds));
+ if (!vertex_translator_.get()) {
+ LOG(ERROR) << "Could not initialize vertex shader translator.";
+ Destroy(true);
+ return false;
+ }
+
+ fragment_translator_ = shader_translator_cache()->GetTranslator(
+ GL_FRAGMENT_SHADER,
+ shader_spec,
+ &resources,
+ implementation_type,
+ static_cast<ShCompileOptions>(driver_bug_workarounds));
+ if (!fragment_translator_.get()) {
+ LOG(ERROR) << "Could not initialize fragment shader translator.";
+ Destroy(true);
+ return false;
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenBuffersHelper(GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetBuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenBuffersARB(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateBuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenFramebuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetFramebuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenFramebuffersEXT(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateFramebuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenRenderbuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetRenderbuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenRenderbuffersEXT(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateRenderbuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenTexturesHelper(GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetTexture(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenTextures(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateTexture(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteBuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Buffer* buffer = GetBuffer(client_ids[ii]);
+ if (buffer && !buffer->IsDeleted()) {
+ state_.vertex_attrib_manager->Unbind(buffer);
+ if (state_.bound_array_buffer.get() == buffer) {
+ state_.bound_array_buffer = NULL;
+ }
+ RemoveBuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteFramebuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Framebuffer* framebuffer =
+ GetFramebuffer(client_ids[ii]);
+ if (framebuffer && !framebuffer->IsDeleted()) {
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer = NULL;
+ framebuffer_state_.clear_state_dirty = true;
+ GLenum target = supports_separate_framebuffer_binds ?
+ GL_DRAW_FRAMEBUFFER_EXT : GL_FRAMEBUFFER;
+ glBindFramebufferEXT(target, GetBackbufferServiceId());
+ }
+ if (framebuffer == framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer = NULL;
+ GLenum target = supports_separate_framebuffer_binds ?
+ GL_READ_FRAMEBUFFER_EXT : GL_FRAMEBUFFER;
+ glBindFramebufferEXT(target, GetBackbufferServiceId());
+ }
+ OnFboChanged();
+ RemoveFramebuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteRenderbuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Renderbuffer* renderbuffer =
+ GetRenderbuffer(client_ids[ii]);
+ if (renderbuffer && !renderbuffer->IsDeleted()) {
+ if (state_.bound_renderbuffer.get() == renderbuffer) {
+ state_.bound_renderbuffer = NULL;
+ }
+ // Unbind from current framebuffers.
+ if (supports_separate_framebuffer_binds) {
+ if (framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer
+ ->UnbindRenderbuffer(GL_READ_FRAMEBUFFER_EXT, renderbuffer);
+ }
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindRenderbuffer(GL_DRAW_FRAMEBUFFER_EXT, renderbuffer);
+ }
+ } else {
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindRenderbuffer(GL_FRAMEBUFFER, renderbuffer);
+ }
+ }
+ framebuffer_state_.clear_state_dirty = true;
+ RemoveRenderbuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteTexturesHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ TextureRef* texture_ref = GetTexture(client_ids[ii]);
+ if (texture_ref) {
+ Texture* texture = texture_ref->texture();
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ // Unbind texture_ref from texture_ref units.
+ for (size_t jj = 0; jj < state_.texture_units.size(); ++jj) {
+ state_.texture_units[jj].Unbind(texture_ref);
+ }
+ // Unbind from current framebuffers.
+ if (supports_separate_framebuffer_binds) {
+ if (framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer
+ ->UnbindTexture(GL_READ_FRAMEBUFFER_EXT, texture_ref);
+ }
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindTexture(GL_DRAW_FRAMEBUFFER_EXT, texture_ref);
+ }
+ } else {
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindTexture(GL_FRAMEBUFFER, texture_ref);
+ }
+ }
+#if defined(OS_MACOSX)
+ GLuint service_id = texture->service_id();
+ if (texture->target() == GL_TEXTURE_RECTANGLE_ARB) {
+ ReleaseIOSurfaceForTexture(service_id);
+ }
+#endif
+ RemoveTexture(client_ids[ii]);
+ }
+ }
+}
+
+// } // anonymous namespace
+
+bool GLES2DecoderImpl::MakeCurrent() {
+ if (!context_.get())
+ return false;
+
+ if (!context_->MakeCurrent(surface_.get()) || WasContextLost()) {
+ LOG(ERROR) << " GLES2DecoderImpl: Context lost during MakeCurrent.";
+
+ // Some D3D drivers cannot recover from device lost in the GPU process
+ // sandbox. Allow a new GPU process to launch.
+ if (workarounds().exit_on_context_lost) {
+ LOG(ERROR) << "Exiting GPU process because some drivers cannot reset"
+ << " a D3D device in the Chrome GPU process sandbox.";
+#if defined(OS_WIN)
+ base::win::SetShouldCrashOnProcessDetach(false);
+#endif
+ exit(0);
+ }
+
+ return false;
+ }
+
+ ProcessFinishedAsyncTransfers();
+
+ // Rebind the FBO if it was unbound by the context.
+ if (workarounds().unbind_fbo_on_context_switch)
+ RestoreFramebufferBindings();
+
+ framebuffer_state_.clear_state_dirty = true;
+
+ return true;
+}
+
+void GLES2DecoderImpl::ProcessFinishedAsyncTransfers() {
+ ProcessPendingReadPixels();
+ if (engine() && query_manager_.get())
+ query_manager_->ProcessPendingTransferQueries();
+
+ // TODO(epenner): Is there a better place to do this?
+ // This needs to occur before we execute any batch of commands
+ // from the client, as the client may have recieved an async
+ // completion while issuing those commands.
+ // "DidFlushStart" would be ideal if we had such a callback.
+ async_pixel_transfer_manager_->BindCompletedAsyncTransfers();
+}
+
+static void RebindCurrentFramebuffer(
+ GLenum target,
+ Framebuffer* framebuffer,
+ GLuint back_buffer_service_id) {
+ GLuint framebuffer_id = framebuffer ? framebuffer->service_id() : 0;
+
+ if (framebuffer_id == 0) {
+ framebuffer_id = back_buffer_service_id;
+ }
+
+ glBindFramebufferEXT(target, framebuffer_id);
+}
+
+void GLES2DecoderImpl::RestoreCurrentFramebufferBindings() {
+ framebuffer_state_.clear_state_dirty = true;
+
+ if (!features().chromium_framebuffer_multisample) {
+ RebindCurrentFramebuffer(
+ GL_FRAMEBUFFER,
+ framebuffer_state_.bound_draw_framebuffer.get(),
+ GetBackbufferServiceId());
+ } else {
+ RebindCurrentFramebuffer(
+ GL_READ_FRAMEBUFFER_EXT,
+ framebuffer_state_.bound_read_framebuffer.get(),
+ GetBackbufferServiceId());
+ RebindCurrentFramebuffer(
+ GL_DRAW_FRAMEBUFFER_EXT,
+ framebuffer_state_.bound_draw_framebuffer.get(),
+ GetBackbufferServiceId());
+ }
+ OnFboChanged();
+}
+
+bool GLES2DecoderImpl::CheckFramebufferValid(
+ Framebuffer* framebuffer,
+ GLenum target, const char* func_name) {
+ if (!framebuffer) {
+ if (surfaceless_)
+ return false;
+ if (backbuffer_needs_clear_bits_) {
+ glClearColor(0, 0, 0, (GLES2Util::GetChannelsForFormat(
+ offscreen_target_color_format_) & 0x0008) != 0 ? 0 : 1);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ bool reset_draw_buffer = false;
+ if ((backbuffer_needs_clear_bits_ | GL_COLOR_BUFFER_BIT) != 0 &&
+ group_->draw_buffer() == GL_NONE) {
+ reset_draw_buffer = true;
+ GLenum buf = GL_BACK;
+ if (GetBackbufferServiceId() != 0) // emulated backbuffer
+ buf = GL_COLOR_ATTACHMENT0;
+ glDrawBuffersARB(1, &buf);
+ }
+ glClear(backbuffer_needs_clear_bits_);
+ if (reset_draw_buffer) {
+ GLenum buf = GL_NONE;
+ glDrawBuffersARB(1, &buf);
+ }
+ backbuffer_needs_clear_bits_ = 0;
+ RestoreClearState();
+ }
+ return true;
+ }
+
+ if (framebuffer_manager()->IsComplete(framebuffer)) {
+ return true;
+ }
+
+ GLenum completeness = framebuffer->IsPossiblyComplete();
+ if (completeness != GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name, "framebuffer incomplete");
+ return false;
+ }
+
+ // Are all the attachments cleared?
+ if (renderbuffer_manager()->HaveUnclearedRenderbuffers() ||
+ texture_manager()->HaveUnclearedMips()) {
+ if (!framebuffer->IsCleared()) {
+ // Can we clear them?
+ if (framebuffer->GetStatus(texture_manager(), target) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name,
+ "framebuffer incomplete (clear)");
+ return false;
+ }
+ ClearUnclearedAttachments(target, framebuffer);
+ }
+ }
+
+ if (!framebuffer_manager()->IsComplete(framebuffer)) {
+ if (framebuffer->GetStatus(texture_manager(), target) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name,
+ "framebuffer incomplete (check)");
+ return false;
+ }
+ framebuffer_manager()->MarkAsComplete(framebuffer);
+ }
+
+ // NOTE: At this point we don't know if the framebuffer is complete but
+ // we DO know that everything that needs to be cleared has been cleared.
+ return true;
+}
+
+bool GLES2DecoderImpl::CheckBoundFramebuffersValid(const char* func_name) {
+ if (!features().chromium_framebuffer_multisample) {
+ bool valid = CheckFramebufferValid(
+ framebuffer_state_.bound_draw_framebuffer.get(), GL_FRAMEBUFFER_EXT,
+ func_name);
+
+ if (valid)
+ OnUseFramebuffer();
+
+ return valid;
+ }
+ return CheckFramebufferValid(framebuffer_state_.bound_draw_framebuffer.get(),
+ GL_DRAW_FRAMEBUFFER_EXT,
+ func_name) &&
+ CheckFramebufferValid(framebuffer_state_.bound_read_framebuffer.get(),
+ GL_READ_FRAMEBUFFER_EXT,
+ func_name);
+}
+
+bool GLES2DecoderImpl::CheckBoundReadFramebufferColorAttachment(
+ const char* func_name) {
+ Framebuffer* framebuffer = features().chromium_framebuffer_multisample ?
+ framebuffer_state_.bound_read_framebuffer.get() :
+ framebuffer_state_.bound_draw_framebuffer.get();
+ if (!framebuffer)
+ return true;
+ if (framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name, "no color image attached");
+ return false;
+ }
+ return true;
+}
+
+gfx::Size GLES2DecoderImpl::GetBoundReadFrameBufferSize() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ const Framebuffer::Attachment* attachment =
+ framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0);
+ if (attachment) {
+ return gfx::Size(attachment->width(), attachment->height());
+ }
+ return gfx::Size(0, 0);
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_size_;
+ } else {
+ return surface_->GetSize();
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundReadFrameBufferTextureType() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentTextureType();
+ } else {
+ return GL_UNSIGNED_BYTE;
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundReadFrameBufferInternalFormat() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentFormat();
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_color_format_;
+ } else {
+ return back_buffer_color_format_;
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundDrawFrameBufferInternalFormat() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentFormat();
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_color_format_;
+ } else {
+ return back_buffer_color_format_;
+ }
+}
+
+void GLES2DecoderImpl::UpdateParentTextureInfo() {
+ if (!offscreen_saved_color_texture_info_.get())
+ return;
+ GLenum target = offscreen_saved_color_texture_info_->texture()->target();
+ glBindTexture(target, offscreen_saved_color_texture_info_->service_id());
+ texture_manager()->SetLevelInfo(
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_2D,
+ 0, // level
+ GL_RGBA,
+ offscreen_size_.width(),
+ offscreen_size_.height(),
+ 1, // depth
+ 0, // border
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ glBindTexture(target, texture_ref ? texture_ref->service_id() : 0);
+}
+
+void GLES2DecoderImpl::SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) {
+ resize_callback_ = callback;
+}
+
+Logger* GLES2DecoderImpl::GetLogger() {
+ return &logger_;
+}
+
+void GLES2DecoderImpl::BeginDecoding() {
+ gpu_tracer_->BeginDecoding();
+ gpu_trace_commands_ = gpu_tracer_->IsTracing();
+ gpu_debug_commands_ = log_commands() || debug() || gpu_trace_commands_ ||
+ (*cb_command_trace_category_ != 0);
+}
+
+void GLES2DecoderImpl::EndDecoding() {
+ gpu_tracer_->EndDecoding();
+}
+
+ErrorState* GLES2DecoderImpl::GetErrorState() {
+ return state_.GetErrorState();
+}
+
+void GLES2DecoderImpl::SetShaderCacheCallback(
+ const ShaderCacheCallback& callback) {
+ shader_cache_callback_ = callback;
+}
+
+void GLES2DecoderImpl::SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) {
+ wait_sync_point_callback_ = callback;
+}
+
+AsyncPixelTransferManager*
+ GLES2DecoderImpl::GetAsyncPixelTransferManager() {
+ return async_pixel_transfer_manager_.get();
+}
+
+void GLES2DecoderImpl::ResetAsyncPixelTransferManagerForTest() {
+ async_pixel_transfer_manager_.reset();
+}
+
+void GLES2DecoderImpl::SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) {
+ async_pixel_transfer_manager_ = make_scoped_ptr(manager);
+}
+
+bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ TextureRef* texture_ref = texture_manager()->GetTexture(client_texture_id);
+ if (texture_ref) {
+ *service_texture_id = texture_ref->service_id();
+ return true;
+ }
+ return false;
+}
+
+uint32 GLES2DecoderImpl::GetTextureUploadCount() {
+ return texture_state_.texture_upload_count +
+ async_pixel_transfer_manager_->GetTextureUploadCount();
+}
+
+base::TimeDelta GLES2DecoderImpl::GetTotalTextureUploadTime() {
+ return texture_state_.total_texture_upload_time +
+ async_pixel_transfer_manager_->GetTotalTextureUploadTime();
+}
+
+base::TimeDelta GLES2DecoderImpl::GetTotalProcessingCommandsTime() {
+ return total_processing_commands_time_;
+}
+
+void GLES2DecoderImpl::AddProcessingCommandsTime(base::TimeDelta time) {
+ total_processing_commands_time_ += time;
+}
+
+void GLES2DecoderImpl::Destroy(bool have_context) {
+ if (!initialized())
+ return;
+
+ DCHECK(!have_context || context_->IsCurrent(NULL));
+
+ // Unbind everything.
+ state_.vertex_attrib_manager = NULL;
+ state_.default_vertex_attrib_manager = NULL;
+ state_.texture_units.clear();
+ state_.bound_array_buffer = NULL;
+ state_.current_queries.clear();
+ framebuffer_state_.bound_read_framebuffer = NULL;
+ framebuffer_state_.bound_draw_framebuffer = NULL;
+ state_.bound_renderbuffer = NULL;
+
+ if (offscreen_saved_color_texture_info_.get()) {
+ DCHECK(offscreen_target_color_texture_);
+ DCHECK_EQ(offscreen_saved_color_texture_info_->service_id(),
+ offscreen_saved_color_texture_->id());
+ offscreen_saved_color_texture_->Invalidate();
+ offscreen_saved_color_texture_info_ = NULL;
+ }
+ if (have_context) {
+ if (copy_texture_CHROMIUM_.get()) {
+ copy_texture_CHROMIUM_->Destroy();
+ copy_texture_CHROMIUM_.reset();
+ }
+
+ if (state_.current_program.get()) {
+ program_manager()->UnuseProgram(shader_manager(),
+ state_.current_program.get());
+ }
+
+ if (attrib_0_buffer_id_) {
+ glDeleteBuffersARB(1, &attrib_0_buffer_id_);
+ }
+ if (fixed_attrib_buffer_id_) {
+ glDeleteBuffersARB(1, &fixed_attrib_buffer_id_);
+ }
+
+ if (validation_texture_) {
+ glDeleteTextures(1, &validation_texture_);
+ glDeleteFramebuffersEXT(1, &validation_fbo_multisample_);
+ glDeleteFramebuffersEXT(1, &validation_fbo_);
+ }
+
+ if (offscreen_target_frame_buffer_.get())
+ offscreen_target_frame_buffer_->Destroy();
+ if (offscreen_target_color_texture_.get())
+ offscreen_target_color_texture_->Destroy();
+ if (offscreen_target_color_render_buffer_.get())
+ offscreen_target_color_render_buffer_->Destroy();
+ if (offscreen_target_depth_render_buffer_.get())
+ offscreen_target_depth_render_buffer_->Destroy();
+ if (offscreen_target_stencil_render_buffer_.get())
+ offscreen_target_stencil_render_buffer_->Destroy();
+ if (offscreen_saved_frame_buffer_.get())
+ offscreen_saved_frame_buffer_->Destroy();
+ if (offscreen_saved_color_texture_.get())
+ offscreen_saved_color_texture_->Destroy();
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Destroy();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Destroy();
+ } else {
+ if (offscreen_target_frame_buffer_.get())
+ offscreen_target_frame_buffer_->Invalidate();
+ if (offscreen_target_color_texture_.get())
+ offscreen_target_color_texture_->Invalidate();
+ if (offscreen_target_color_render_buffer_.get())
+ offscreen_target_color_render_buffer_->Invalidate();
+ if (offscreen_target_depth_render_buffer_.get())
+ offscreen_target_depth_render_buffer_->Invalidate();
+ if (offscreen_target_stencil_render_buffer_.get())
+ offscreen_target_stencil_render_buffer_->Invalidate();
+ if (offscreen_saved_frame_buffer_.get())
+ offscreen_saved_frame_buffer_->Invalidate();
+ if (offscreen_saved_color_texture_.get())
+ offscreen_saved_color_texture_->Invalidate();
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Invalidate();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Invalidate();
+ }
+
+ // Current program must be cleared after calling ProgramManager::UnuseProgram.
+ // Otherwise, we can leak objects. http://crbug.com/258772.
+ // state_.current_program must be reset before group_ is reset because
+ // the later deletes the ProgramManager object that referred by
+ // state_.current_program object.
+ state_.current_program = NULL;
+
+ copy_texture_CHROMIUM_.reset();
+
+ if (query_manager_.get()) {
+ query_manager_->Destroy(have_context);
+ query_manager_.reset();
+ }
+
+ if (vertex_array_manager_ .get()) {
+ vertex_array_manager_->Destroy(have_context);
+ vertex_array_manager_.reset();
+ }
+
+ if (image_manager_.get()) {
+ image_manager_->Destroy(have_context);
+ image_manager_.reset();
+ }
+
+ offscreen_target_frame_buffer_.reset();
+ offscreen_target_color_texture_.reset();
+ offscreen_target_color_render_buffer_.reset();
+ offscreen_target_depth_render_buffer_.reset();
+ offscreen_target_stencil_render_buffer_.reset();
+ offscreen_saved_frame_buffer_.reset();
+ offscreen_saved_color_texture_.reset();
+ offscreen_resolved_frame_buffer_.reset();
+ offscreen_resolved_color_texture_.reset();
+
+ // Need to release these before releasing |group_| which may own the
+ // ShaderTranslatorCache.
+ fragment_translator_ = NULL;
+ vertex_translator_ = NULL;
+
+ // Should destroy the transfer manager before the texture manager held
+ // by the context group.
+ async_pixel_transfer_manager_.reset();
+
+ if (group_.get()) {
+ framebuffer_manager()->RemoveObserver(this);
+ group_->Destroy(this, have_context);
+ group_ = NULL;
+ }
+
+ if (context_.get()) {
+ context_->ReleaseCurrent(NULL);
+ context_ = NULL;
+ }
+
+#if defined(OS_MACOSX)
+ for (TextureToIOSurfaceMap::iterator it = texture_to_io_surface_map_.begin();
+ it != texture_to_io_surface_map_.end(); ++it) {
+ CFRelease(it->second);
+ }
+ texture_to_io_surface_map_.clear();
+#endif
+}
+
+void GLES2DecoderImpl::SetSurface(
+ const scoped_refptr<gfx::GLSurface>& surface) {
+ DCHECK(context_->IsCurrent(NULL));
+ DCHECK(surface_.get());
+ surface_ = surface;
+ RestoreCurrentFramebufferBindings();
+}
+
+void GLES2DecoderImpl::ProduceFrontBuffer(const Mailbox& mailbox) {
+ if (!offscreen_saved_color_texture_.get()) {
+ LOG(ERROR) << "Called ProduceFrontBuffer on a non-offscreen context";
+ return;
+ }
+ if (!offscreen_saved_color_texture_info_.get()) {
+ GLuint service_id = offscreen_saved_color_texture_->id();
+ offscreen_saved_color_texture_info_ = TextureRef::Create(
+ texture_manager(), 0, service_id);
+ texture_manager()->SetTarget(offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_2D);
+ UpdateParentTextureInfo();
+ }
+ mailbox_manager()->ProduceTexture(
+ GL_TEXTURE_2D, mailbox, offscreen_saved_color_texture_info_->texture());
+}
+
+bool GLES2DecoderImpl::ResizeOffscreenFrameBuffer(const gfx::Size& size) {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ if (!is_offscreen) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer called "
+ << " with an onscreen framebuffer.";
+ return false;
+ }
+
+ if (offscreen_size_ == size)
+ return true;
+
+ offscreen_size_ = size;
+ int w = offscreen_size_.width();
+ int h = offscreen_size_.height();
+ if (w < 0 || h < 0 || h >= (INT_MAX / 4) / (w ? w : 1)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage due to excessive dimensions.";
+ return false;
+ }
+
+ // Reallocate the offscreen target buffers.
+ DCHECK(offscreen_target_color_format_);
+ if (IsOffscreenBufferMultisampled()) {
+ if (!offscreen_target_color_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_color_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target color buffer.";
+ return false;
+ }
+ } else {
+ if (!offscreen_target_color_texture_->AllocateStorage(
+ offscreen_size_, offscreen_target_color_format_, false)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target color texture.";
+ return false;
+ }
+ }
+ if (offscreen_target_depth_format_ &&
+ !offscreen_target_depth_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_depth_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target depth buffer.";
+ return false;
+ }
+ if (offscreen_target_stencil_format_ &&
+ !offscreen_target_stencil_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_stencil_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target stencil buffer.";
+ return false;
+ }
+
+ // Attach the offscreen target buffers to the target frame buffer.
+ if (IsOffscreenBufferMultisampled()) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_COLOR_ATTACHMENT0,
+ offscreen_target_color_render_buffer_.get());
+ } else {
+ offscreen_target_frame_buffer_->AttachRenderTexture(
+ offscreen_target_color_texture_.get());
+ }
+ if (offscreen_target_depth_format_) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_DEPTH_ATTACHMENT,
+ offscreen_target_depth_render_buffer_.get());
+ }
+ const bool packed_depth_stencil =
+ offscreen_target_depth_format_ == GL_DEPTH24_STENCIL8;
+ if (packed_depth_stencil) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_STENCIL_ATTACHMENT,
+ offscreen_target_depth_render_buffer_.get());
+ } else if (offscreen_target_stencil_format_) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_STENCIL_ATTACHMENT,
+ offscreen_target_stencil_render_buffer_.get());
+ }
+
+ if (offscreen_target_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "because offscreen FBO was incomplete.";
+ return false;
+ }
+
+ // Clear the target frame buffer.
+ {
+ ScopedFrameBufferBinder binder(this, offscreen_target_frame_buffer_->id());
+ glClearColor(0, 0, 0, (GLES2Util::GetChannelsForFormat(
+ offscreen_target_color_format_) & 0x0008) != 0 ? 0 : 1);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(0);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ RestoreClearState();
+ }
+
+ // Destroy the offscreen resolved framebuffers.
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Destroy();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Destroy();
+ offscreen_resolved_color_texture_.reset();
+ offscreen_resolved_frame_buffer_.reset();
+
+ return true;
+}
+
+error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ResizeCHROMIUM& c =
+ *static_cast<const gles2::cmds::ResizeCHROMIUM*>(cmd_data);
+ if (!offscreen_target_frame_buffer_.get() && surface_->DeferDraws())
+ return error::kDeferCommandUntilLater;
+
+ GLuint width = static_cast<GLuint>(c.width);
+ GLuint height = static_cast<GLuint>(c.height);
+ GLfloat scale_factor = c.scale_factor;
+ TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
+
+ width = std::max(1U, width);
+ height = std::max(1U, height);
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && \
+ !defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
+ // Make sure that we are done drawing to the back buffer before resizing.
+ glFinish();
+#endif
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ if (is_offscreen) {
+ if (!ResizeOffscreenFrameBuffer(gfx::Size(width, height))) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because "
+ << "ResizeOffscreenFrameBuffer failed.";
+ return error::kLostContext;
+ }
+ }
+
+ if (!resize_callback_.is_null()) {
+ resize_callback_.Run(gfx::Size(width, height), scale_factor);
+ DCHECK(context_->IsCurrent(surface_.get()));
+ if (!context_->IsCurrent(surface_.get())) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because context no longer "
+ << "current after resize callback.";
+ return error::kLostContext;
+ }
+ }
+
+ return error::kNoError;
+}
+
+const char* GLES2DecoderImpl::GetCommandName(unsigned int command_id) const {
+ if (command_id > kStartPoint && command_id < kNumCommands) {
+ return gles2::GetCommandName(static_cast<CommandId>(command_id));
+ }
+ return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
+}
+
+// Decode a command, and call the corresponding GL functions.
+// NOTE: DoCommand() is slower than calling DoCommands() on larger batches
+// of commands at once, and is now only used for tests that need to track
+// individual commands.
+error::Error GLES2DecoderImpl::DoCommand(unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) {
+ return DoCommands(1, cmd_data, arg_count + 1, 0);
+}
+
+// Decode multiple commands, and call the corresponding GL functions.
+// NOTE: 'buffer' is a pointer to the command buffer. As such, it could be
+// changed by a (malicious) client at any time, so if validation has to happen,
+// it should operate on a copy of them.
+// NOTE: This is duplicating code from AsyncAPIInterface::DoCommands() in the
+// interest of performance in this critical execution loop.
+template <bool DebugImpl>
+error::Error GLES2DecoderImpl::DoCommandsImpl(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ commands_to_process_ = num_commands;
+ error::Error result = error::kNoError;
+ const CommandBufferEntry* cmd_data =
+ static_cast<const CommandBufferEntry*>(buffer);
+ int process_pos = 0;
+ unsigned int command = 0;
+
+ while (process_pos < num_entries && result == error::kNoError &&
+ commands_to_process_--) {
+ const unsigned int size = cmd_data->value_header.size;
+ command = cmd_data->value_header.command;
+
+ if (size == 0) {
+ result = error::kInvalidSize;
+ break;
+ }
+
+ if (static_cast<int>(size) + process_pos > num_entries) {
+ result = error::kOutOfBounds;
+ break;
+ }
+
+ if (DebugImpl) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
+ GetCommandName(command));
+
+ if (log_commands()) {
+ LOG(ERROR) << "[" << logger_.GetLogPrefix() << "]"
+ << "cmd: " << GetCommandName(command);
+ }
+ }
+
+ const unsigned int arg_count = size - 1;
+ unsigned int command_index = command - kStartPoint - 1;
+ if (command_index < arraysize(command_info)) {
+ const CommandInfo& info = command_info[command_index];
+ unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
+ if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
+ (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
+ bool doing_gpu_trace = false;
+ if (DebugImpl && gpu_trace_commands_) {
+ if (CMD_FLAG_GET_TRACE_LEVEL(info.cmd_flags) <= gpu_trace_level_) {
+ doing_gpu_trace = true;
+ gpu_tracer_->Begin(GetCommandName(command), kTraceDecoder);
+ }
+ }
+
+ uint32 immediate_data_size = (arg_count - info_arg_count) *
+ sizeof(CommandBufferEntry); // NOLINT
+
+ result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
+
+ if (DebugImpl && doing_gpu_trace)
+ gpu_tracer_->End(kTraceDecoder);
+
+ if (DebugImpl && debug()) {
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
+ << "GL ERROR: " << GLES2Util::GetStringEnum(error)
+ << " : " << GetCommandName(command);
+ LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
+ }
+ }
+ } else {
+ result = error::kInvalidArguments;
+ }
+ } else {
+ result = DoCommonCommand(command, arg_count, cmd_data);
+ }
+
+ if (DebugImpl) {
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
+ GetCommandName(command));
+ }
+
+ if (result == error::kNoError &&
+ current_decoder_error_ != error::kNoError) {
+ result = current_decoder_error_;
+ current_decoder_error_ = error::kNoError;
+ }
+
+ if (result != error::kDeferCommandUntilLater) {
+ process_pos += size;
+ cmd_data += size;
+ }
+ }
+
+ if (entries_processed)
+ *entries_processed = process_pos;
+
+ if (error::IsError(result)) {
+ LOG(ERROR) << "Error: " << result << " for Command "
+ << GetCommandName(command);
+ }
+
+ return result;
+}
+
+error::Error GLES2DecoderImpl::DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ if (gpu_debug_commands_) {
+ return DoCommandsImpl<true>(
+ num_commands, buffer, num_entries, entries_processed);
+ } else {
+ return DoCommandsImpl<false>(
+ num_commands, buffer, num_entries, entries_processed);
+ }
+}
+
+void GLES2DecoderImpl::RemoveBuffer(GLuint client_id) {
+ buffer_manager()->RemoveBuffer(client_id);
+}
+
+bool GLES2DecoderImpl::CreateProgramHelper(GLuint client_id) {
+ if (GetProgram(client_id)) {
+ return false;
+ }
+ GLuint service_id = glCreateProgram();
+ if (service_id != 0) {
+ CreateProgram(client_id, service_id);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::CreateShaderHelper(GLenum type, GLuint client_id) {
+ if (GetShader(client_id)) {
+ return false;
+ }
+ GLuint service_id = glCreateShader(type);
+ if (service_id != 0) {
+ CreateShader(client_id, service_id, type);
+ }
+ return true;
+}
+
+void GLES2DecoderImpl::DoFinish() {
+ glFinish();
+ ProcessPendingReadPixels();
+ ProcessPendingQueries();
+}
+
+void GLES2DecoderImpl::DoFlush() {
+ glFlush();
+ ProcessPendingQueries();
+}
+
+void GLES2DecoderImpl::DoActiveTexture(GLenum texture_unit) {
+ GLuint texture_index = texture_unit - GL_TEXTURE0;
+ if (texture_index >= state_.texture_units.size()) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glActiveTexture", texture_unit, "texture_unit");
+ return;
+ }
+ state_.active_texture_unit = texture_index;
+ glActiveTexture(texture_unit);
+}
+
+void GLES2DecoderImpl::DoBindBuffer(GLenum target, GLuint client_id) {
+ Buffer* buffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ buffer = GetBuffer(client_id);
+ if (!buffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindBuffer",
+ "id not generated by glGenBuffers");
+ return;
+ }
+
+ // It's a new id so make a buffer buffer for it.
+ glGenBuffersARB(1, &service_id);
+ CreateBuffer(client_id, service_id);
+ buffer = GetBuffer(client_id);
+ }
+ }
+ LogClientServiceForInfo(buffer, client_id, "glBindBuffer");
+ if (buffer) {
+ if (!buffer_manager()->SetTarget(buffer, target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindBuffer", "buffer bound to more than 1 target");
+ return;
+ }
+ service_id = buffer->service_id();
+ }
+ switch (target) {
+ case GL_ARRAY_BUFFER:
+ state_.bound_array_buffer = buffer;
+ break;
+ case GL_ELEMENT_ARRAY_BUFFER:
+ state_.vertex_attrib_manager->SetElementArrayBuffer(buffer);
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+ glBindBuffer(target, service_id);
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasColorAttachmentWithAlpha(
+ bool all_draw_buffers) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (!all_draw_buffers || !framebuffer) {
+ return (GLES2Util::GetChannelsForFormat(
+ GetBoundDrawFrameBufferInternalFormat()) & 0x0008) != 0;
+ }
+ return framebuffer->HasAlphaMRT();
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasDepthAttachment() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ return framebuffer->HasDepthAttachment();
+ }
+ if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_depth_format_ != 0;
+ }
+ return back_buffer_has_depth_;
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasStencilAttachment() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ return framebuffer->HasStencilAttachment();
+ }
+ if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_stencil_format_ != 0 ||
+ offscreen_target_depth_format_ == GL_DEPTH24_STENCIL8;
+ }
+ return back_buffer_has_stencil_;
+}
+
+void GLES2DecoderImpl::ApplyDirtyState() {
+ if (framebuffer_state_.clear_state_dirty) {
+ bool have_alpha = BoundFramebufferHasColorAttachmentWithAlpha(true);
+ state_.SetDeviceColorMask(state_.color_mask_red,
+ state_.color_mask_green,
+ state_.color_mask_blue,
+ state_.color_mask_alpha && have_alpha);
+
+ bool have_depth = BoundFramebufferHasDepthAttachment();
+ state_.SetDeviceDepthMask(state_.depth_mask && have_depth);
+
+ bool have_stencil = BoundFramebufferHasStencilAttachment();
+ state_.SetDeviceStencilMaskSeparate(
+ GL_FRONT, have_stencil ? state_.stencil_front_writemask : 0);
+ state_.SetDeviceStencilMaskSeparate(
+ GL_BACK, have_stencil ? state_.stencil_back_writemask : 0);
+
+ state_.SetDeviceCapabilityState(
+ GL_DEPTH_TEST, state_.enable_flags.depth_test && have_depth);
+ state_.SetDeviceCapabilityState(
+ GL_STENCIL_TEST, state_.enable_flags.stencil_test && have_stencil);
+ framebuffer_state_.clear_state_dirty = false;
+ }
+}
+
+GLuint GLES2DecoderImpl::GetBackbufferServiceId() const {
+ return (offscreen_target_frame_buffer_.get())
+ ? offscreen_target_frame_buffer_->id()
+ : (surface_.get() ? surface_->GetBackingFrameBufferObject() : 0);
+}
+
+void GLES2DecoderImpl::RestoreState(const ContextState* prev_state) {
+ TRACE_EVENT1("gpu", "GLES2DecoderImpl::RestoreState",
+ "context", logger_.GetLogPrefix());
+ // Restore the Framebuffer first because of bugs in Intel drivers.
+ // Intel drivers incorrectly clip the viewport settings to
+ // the size of the current framebuffer object.
+ RestoreFramebufferBindings();
+ state_.RestoreState(prev_state);
+}
+
+void GLES2DecoderImpl::RestoreFramebufferBindings() const {
+ GLuint service_id =
+ framebuffer_state_.bound_draw_framebuffer.get()
+ ? framebuffer_state_.bound_draw_framebuffer->service_id()
+ : GetBackbufferServiceId();
+ if (!features().chromium_framebuffer_multisample) {
+ glBindFramebufferEXT(GL_FRAMEBUFFER, service_id);
+ } else {
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, service_id);
+ service_id = framebuffer_state_.bound_read_framebuffer.get()
+ ? framebuffer_state_.bound_read_framebuffer->service_id()
+ : GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, service_id);
+ }
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::RestoreRenderbufferBindings() {
+ state_.RestoreRenderbufferBindings();
+}
+
+void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) const {
+ Texture* texture = texture_manager()->GetTextureForServiceId(service_id);
+ if (texture) {
+ GLenum target = texture->target();
+ glBindTexture(target, service_id);
+ glTexParameteri(
+ target, GL_TEXTURE_WRAP_S, texture->wrap_s());
+ glTexParameteri(
+ target, GL_TEXTURE_WRAP_T, texture->wrap_t());
+ glTexParameteri(
+ target, GL_TEXTURE_MIN_FILTER, texture->min_filter());
+ glTexParameteri(
+ target, GL_TEXTURE_MAG_FILTER, texture->mag_filter());
+ RestoreTextureUnitBindings(state_.active_texture_unit);
+ }
+}
+
+void GLES2DecoderImpl::ClearAllAttributes() const {
+ // Must use native VAO 0, as RestoreAllAttributes can't fully restore
+ // other VAOs.
+ if (feature_info_->feature_flags().native_vertex_array_object)
+ glBindVertexArrayOES(0);
+
+ for (uint32 i = 0; i < group_->max_vertex_attribs(); ++i) {
+ if (i != 0) // Never disable attribute 0
+ glDisableVertexAttribArray(i);
+ if(features().angle_instanced_arrays)
+ glVertexAttribDivisorANGLE(i, 0);
+ }
+}
+
+void GLES2DecoderImpl::RestoreAllAttributes() const {
+ state_.RestoreVertexAttribs();
+}
+
+void GLES2DecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
+ state_.SetIgnoreCachedStateForTest(ignore);
+}
+
+void GLES2DecoderImpl::OnFboChanged() const {
+ if (workarounds().restore_scissor_on_fbo_change)
+ state_.fbo_binding_for_scissor_workaround_dirty_ = true;
+}
+
+// Called after the FBO is checked for completeness.
+void GLES2DecoderImpl::OnUseFramebuffer() const {
+ if (state_.fbo_binding_for_scissor_workaround_dirty_) {
+ state_.fbo_binding_for_scissor_workaround_dirty_ = false;
+ // The driver forgets the correct scissor when modifying the FBO binding.
+ glScissor(state_.scissor_x,
+ state_.scissor_y,
+ state_.scissor_width,
+ state_.scissor_height);
+
+ // crbug.com/222018 - Also on QualComm, the flush here avoids flicker,
+ // it's unclear how this bug works.
+ glFlush();
+ }
+}
+
+void GLES2DecoderImpl::DoBindFramebuffer(GLenum target, GLuint client_id) {
+ Framebuffer* framebuffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ framebuffer = GetFramebuffer(client_id);
+ if (!framebuffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindFramebuffer",
+ "id not generated by glGenFramebuffers");
+ return;
+ }
+
+ // It's a new id so make a framebuffer framebuffer for it.
+ glGenFramebuffersEXT(1, &service_id);
+ CreateFramebuffer(client_id, service_id);
+ framebuffer = GetFramebuffer(client_id);
+ } else {
+ service_id = framebuffer->service_id();
+ }
+ framebuffer->MarkAsValid();
+ }
+ LogClientServiceForInfo(framebuffer, client_id, "glBindFramebuffer");
+
+ if (target == GL_FRAMEBUFFER || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ framebuffer_state_.bound_draw_framebuffer = framebuffer;
+ }
+
+ // vmiura: This looks like dup code
+ if (target == GL_FRAMEBUFFER || target == GL_READ_FRAMEBUFFER_EXT) {
+ framebuffer_state_.bound_read_framebuffer = framebuffer;
+ }
+
+ framebuffer_state_.clear_state_dirty = true;
+
+ // If we are rendering to the backbuffer get the FBO id for any simulated
+ // backbuffer.
+ if (framebuffer == NULL) {
+ service_id = GetBackbufferServiceId();
+ }
+
+ glBindFramebufferEXT(target, service_id);
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoBindRenderbuffer(GLenum target, GLuint client_id) {
+ Renderbuffer* renderbuffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ renderbuffer = GetRenderbuffer(client_id);
+ if (!renderbuffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindRenderbuffer",
+ "id not generated by glGenRenderbuffers");
+ return;
+ }
+
+ // It's a new id so make a renderbuffer for it.
+ glGenRenderbuffersEXT(1, &service_id);
+ CreateRenderbuffer(client_id, service_id);
+ renderbuffer = GetRenderbuffer(client_id);
+ } else {
+ service_id = renderbuffer->service_id();
+ }
+ renderbuffer->MarkAsValid();
+ }
+ LogClientServiceForInfo(renderbuffer, client_id, "glBindRenderbuffer");
+ state_.bound_renderbuffer = renderbuffer;
+ state_.bound_renderbuffer_valid = true;
+ glBindRenderbufferEXT(GL_RENDERBUFFER, service_id);
+}
+
+void GLES2DecoderImpl::DoBindTexture(GLenum target, GLuint client_id) {
+ TextureRef* texture_ref = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ texture_ref = GetTexture(client_id);
+ if (!texture_ref) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindTexture",
+ "id not generated by glGenTextures");
+ return;
+ }
+
+ // It's a new id so make a texture texture for it.
+ glGenTextures(1, &service_id);
+ DCHECK_NE(0u, service_id);
+ CreateTexture(client_id, service_id);
+ texture_ref = GetTexture(client_id);
+ }
+ } else {
+ texture_ref = texture_manager()->GetDefaultTextureInfo(target);
+ }
+
+ // Check the texture exists
+ if (texture_ref) {
+ Texture* texture = texture_ref->texture();
+ // Check that we are not trying to bind it to a different target.
+ if (texture->target() != 0 && texture->target() != target) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindTexture",
+ "texture bound to more than 1 target.");
+ return;
+ }
+ LogClientServiceForInfo(texture, client_id, "glBindTexture");
+ if (texture->target() == 0) {
+ texture_manager()->SetTarget(texture_ref, target);
+ }
+ glBindTexture(target, texture->service_id());
+ } else {
+ glBindTexture(target, 0);
+ }
+
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ unit.bind_target = target;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ unit.bound_texture_2d = texture_ref;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ unit.bound_texture_cube_map = texture_ref;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ unit.bound_texture_external_oes = texture_ref;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ unit.bound_texture_rectangle_arb = texture_ref;
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoDisableVertexAttribArray(GLuint index) {
+ if (state_.vertex_attrib_manager->Enable(index, false)) {
+ if (index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ glDisableVertexAttribArray(index);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glDisableVertexAttribArray", "index out of range");
+ }
+}
+
+void GLES2DecoderImpl::DoDiscardFramebufferEXT(GLenum target,
+ GLsizei numAttachments,
+ const GLenum* attachments) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+
+ // Validates the attachments. If one of them fails
+ // the whole command fails.
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ if ((framebuffer &&
+ !validators_->attachment.IsValid(attachments[i])) ||
+ (!framebuffer &&
+ !validators_->backbuffer_attachment.IsValid(attachments[i]))) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glDiscardFramebufferEXT", attachments[i], "attachments");
+ return;
+ }
+ }
+
+ // Marks each one of them as not cleared
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ if (framebuffer) {
+ framebuffer->MarkAttachmentAsCleared(renderbuffer_manager(),
+ texture_manager(),
+ attachments[i],
+ false);
+ } else {
+ switch (attachments[i]) {
+ case GL_COLOR_EXT:
+ backbuffer_needs_clear_bits_ |= GL_COLOR_BUFFER_BIT;
+ break;
+ case GL_DEPTH_EXT:
+ backbuffer_needs_clear_bits_ |= GL_DEPTH_BUFFER_BIT;
+ case GL_STENCIL_EXT:
+ backbuffer_needs_clear_bits_ |= GL_STENCIL_BUFFER_BIT;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+ }
+
+ // If the default framebuffer is bound but we are still rendering to an
+ // FBO, translate attachment names that refer to default framebuffer
+ // channels to corresponding framebuffer attachments.
+ scoped_ptr<GLenum[]> translated_attachments(new GLenum[numAttachments]);
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ GLenum attachment = attachments[i];
+ if (!framebuffer && GetBackbufferServiceId()) {
+ switch (attachment) {
+ case GL_COLOR_EXT:
+ attachment = GL_COLOR_ATTACHMENT0;
+ break;
+ case GL_DEPTH_EXT:
+ attachment = GL_DEPTH_ATTACHMENT;
+ break;
+ case GL_STENCIL_EXT:
+ attachment = GL_STENCIL_ATTACHMENT;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ }
+ translated_attachments[i] = attachment;
+ }
+
+ glDiscardFramebufferEXT(target, numAttachments, translated_attachments.get());
+}
+
+void GLES2DecoderImpl::DoEnableVertexAttribArray(GLuint index) {
+ if (state_.vertex_attrib_manager->Enable(index, true)) {
+ glEnableVertexAttribArray(index);
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glEnableVertexAttribArray", "index out of range");
+ }
+}
+
+void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref ||
+ !texture_manager()->CanGenerateMipmaps(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGenerateMipmap", "Can not generate mips");
+ return;
+ }
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ for (int i = 0; i < 6; ++i) {
+ GLenum face = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, face, 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glGenerateMipmap", "dimensions too big");
+ return;
+ }
+ }
+ } else {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, target, 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glGenerateMipmap", "dimensions too big");
+ return;
+ }
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glGenerateMipmap");
+ // Workaround for Mac driver bug. In the large scheme of things setting
+ // glTexParamter twice for glGenerateMipmap is probably not a lage performance
+ // hit so there's probably no need to make this conditional. The bug appears
+ // to be that if the filtering mode is set to something that doesn't require
+ // mipmaps for rendering, or is never set to something other than the default,
+ // then glGenerateMipmap misbehaves.
+ if (workarounds().set_texture_filter_before_generating_mipmap) {
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+ }
+ glGenerateMipmapEXT(target);
+ if (workarounds().set_texture_filter_before_generating_mipmap) {
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER,
+ texture_ref->texture()->min_filter());
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glGenerateMipmap");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->MarkMipmapsGenerated(texture_ref);
+ }
+}
+
+bool GLES2DecoderImpl::GetHelper(
+ GLenum pname, GLint* params, GLsizei* num_written) {
+ DCHECK(num_written);
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ switch (pname) {
+ case GL_IMPLEMENTATION_COLOR_READ_FORMAT:
+ *num_written = 1;
+ // Return the GL implementation's preferred format and (see below type)
+ // if we have the GL extension that exposes this. This allows the GPU
+ // client to use the implementation's preferred format for glReadPixels
+ // for optimisation.
+ //
+ // A conflicting extension (GL_ARB_ES2_compatibility) specifies an error
+ // case when requested on integer/floating point buffers but which is
+ // acceptable on GLES2 and with the GL_OES_read_format extension.
+ //
+ // Therefore if an error occurs we swallow the error and use the
+ // internal implementation.
+ if (params) {
+ if (context_->HasExtension("GL_OES_read_format")) {
+ ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::GetHelper",
+ GetErrorState());
+ glGetIntegerv(pname, params);
+ if (glGetError() == GL_NO_ERROR)
+ return true;
+ }
+ *params = GLES2Util::GetPreferredGLReadPixelsFormat(
+ GetBoundReadFrameBufferInternalFormat());
+ }
+ return true;
+ case GL_IMPLEMENTATION_COLOR_READ_TYPE:
+ *num_written = 1;
+ if (params) {
+ if (context_->HasExtension("GL_OES_read_format")) {
+ ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::GetHelper",
+ GetErrorState());
+ glGetIntegerv(pname, params);
+ if (glGetError() == GL_NO_ERROR)
+ return true;
+ }
+ *params = GLES2Util::GetPreferredGLReadPixelsType(
+ GetBoundReadFrameBufferInternalFormat(),
+ GetBoundReadFrameBufferTextureType());
+ }
+ return true;
+ case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_fragment_uniform_vectors();
+ }
+ return true;
+ case GL_MAX_VARYING_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_varying_vectors();
+ }
+ return true;
+ case GL_MAX_VERTEX_UNIFORM_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_vertex_uniform_vectors();
+ }
+ return true;
+ }
+ }
+ switch (pname) {
+ case GL_MAX_VIEWPORT_DIMS:
+ if (offscreen_target_frame_buffer_.get()) {
+ *num_written = 2;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_renderbuffer_size();
+ params[1] = renderbuffer_manager()->max_renderbuffer_size();
+ }
+ return true;
+ }
+ return false;
+ case GL_MAX_SAMPLES:
+ *num_written = 1;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_samples();
+ }
+ return true;
+ case GL_MAX_RENDERBUFFER_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_renderbuffer_size();
+ }
+ return true;
+ case GL_MAX_TEXTURE_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = texture_manager()->MaxSizeForTarget(GL_TEXTURE_2D);
+ }
+ return true;
+ case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = texture_manager()->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP);
+ }
+ return true;
+ case GL_MAX_COLOR_ATTACHMENTS_EXT:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->max_color_attachments();
+ }
+ return true;
+ case GL_MAX_DRAW_BUFFERS_ARB:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->max_draw_buffers();
+ }
+ return true;
+ case GL_ALPHA_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_ALPHA_BITS, &v);
+ params[0] = BoundFramebufferHasColorAttachmentWithAlpha(false) ? v : 0;
+ }
+ return true;
+ case GL_DEPTH_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_DEPTH_BITS, &v);
+ params[0] = BoundFramebufferHasDepthAttachment() ? v : 0;
+ }
+ return true;
+ case GL_STENCIL_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_STENCIL_BITS, &v);
+ params[0] = BoundFramebufferHasStencilAttachment() ? v : 0;
+ }
+ return true;
+ case GL_COMPRESSED_TEXTURE_FORMATS:
+ *num_written = validators_->compressed_texture_format.GetValues().size();
+ if (params) {
+ for (GLint ii = 0; ii < *num_written; ++ii) {
+ params[ii] = validators_->compressed_texture_format.GetValues()[ii];
+ }
+ }
+ return true;
+ case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ *num_written = 1;
+ if (params) {
+ *params = validators_->compressed_texture_format.GetValues().size();
+ }
+ return true;
+ case GL_NUM_SHADER_BINARY_FORMATS:
+ *num_written = 1;
+ if (params) {
+ *params = validators_->shader_binary_format.GetValues().size();
+ }
+ return true;
+ case GL_SHADER_BINARY_FORMATS:
+ *num_written = validators_->shader_binary_format.GetValues().size();
+ if (params) {
+ for (GLint ii = 0; ii < *num_written; ++ii) {
+ params[ii] = validators_->shader_binary_format.GetValues()[ii];
+ }
+ }
+ return true;
+ case GL_SHADER_COMPILER:
+ *num_written = 1;
+ if (params) {
+ *params = GL_TRUE;
+ }
+ return true;
+ case GL_ARRAY_BUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ if (state_.bound_array_buffer.get()) {
+ GLuint client_id = 0;
+ buffer_manager()->GetClientId(state_.bound_array_buffer->service_id(),
+ &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ if (state_.vertex_attrib_manager->element_array_buffer()) {
+ GLuint client_id = 0;
+ buffer_manager()->GetClientId(
+ state_.vertex_attrib_manager->element_array_buffer()->
+ service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_FRAMEBUFFER_BINDING:
+ // case GL_DRAW_FRAMEBUFFER_BINDING_EXT: (same as GL_FRAMEBUFFER_BINDING)
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ GLuint client_id = 0;
+ framebuffer_manager()->GetClientId(
+ framebuffer->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_READ_FRAMEBUFFER_BINDING_EXT:
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ GLuint client_id = 0;
+ framebuffer_manager()->GetClientId(
+ framebuffer->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_RENDERBUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (renderbuffer) {
+ *params = renderbuffer->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_CURRENT_PROGRAM:
+ *num_written = 1;
+ if (params) {
+ if (state_.current_program.get()) {
+ GLuint client_id = 0;
+ program_manager()->GetClientId(
+ state_.current_program->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_VERTEX_ARRAY_BINDING_OES:
+ *num_written = 1;
+ if (params) {
+ if (state_.vertex_attrib_manager.get() !=
+ state_.default_vertex_attrib_manager.get()) {
+ GLuint client_id = 0;
+ vertex_array_manager_->GetClientId(
+ state_.vertex_attrib_manager->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_2D:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_2d.get()) {
+ *params = unit.bound_texture_2d->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_cube_map.get()) {
+ *params = unit.bound_texture_cube_map->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_EXTERNAL_OES:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_external_oes.get()) {
+ *params = unit.bound_texture_external_oes->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_RECTANGLE_ARB:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_rectangle_arb.get()) {
+ *params = unit.bound_texture_rectangle_arb->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_flip_y_;
+ }
+ return true;
+ case GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_premultiply_alpha_;
+ }
+ return true;
+ case GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_unpremultiply_alpha_;
+ }
+ return true;
+ case GL_BIND_GENERATES_RESOURCE_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->bind_generates_resource() ? 1 : 0;
+ }
+ return true;
+ default:
+ if (pname >= GL_DRAW_BUFFER0_ARB &&
+ pname < GL_DRAW_BUFFER0_ARB + group_->max_draw_buffers()) {
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ params[0] = framebuffer->GetDrawBuffer(pname);
+ } else { // backbuffer
+ if (pname == GL_DRAW_BUFFER0_ARB)
+ params[0] = group_->draw_buffer();
+ else
+ params[0] = GL_NONE;
+ }
+ }
+ return true;
+ }
+ *num_written = util_.GLGetNumValuesReturned(pname);
+ return false;
+ }
+}
+
+bool GLES2DecoderImpl::GetNumValuesReturnedForGLGet(
+ GLenum pname, GLsizei* num_values) {
+ if (state_.GetStateAsGLint(pname, NULL, num_values)) {
+ return true;
+ }
+ return GetHelper(pname, NULL, num_values);
+}
+
+GLenum GLES2DecoderImpl::AdjustGetPname(GLenum pname) {
+ if (GL_MAX_SAMPLES == pname &&
+ features().use_img_for_multisampled_render_to_texture) {
+ return GL_MAX_SAMPLES_IMG;
+ }
+ return pname;
+}
+
+void GLES2DecoderImpl::DoGetBooleanv(GLenum pname, GLboolean* params) {
+ DCHECK(params);
+ GLsizei num_written = 0;
+ if (GetNumValuesReturnedForGLGet(pname, &num_written)) {
+ scoped_ptr<GLint[]> values(new GLint[num_written]);
+ if (!state_.GetStateAsGLint(pname, values.get(), &num_written)) {
+ GetHelper(pname, values.get(), &num_written);
+ }
+ for (GLsizei ii = 0; ii < num_written; ++ii) {
+ params[ii] = static_cast<GLboolean>(values[ii]);
+ }
+ } else {
+ pname = AdjustGetPname(pname);
+ glGetBooleanv(pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetFloatv(GLenum pname, GLfloat* params) {
+ DCHECK(params);
+ GLsizei num_written = 0;
+ if (!state_.GetStateAsGLfloat(pname, params, &num_written)) {
+ if (GetHelper(pname, NULL, &num_written)) {
+ scoped_ptr<GLint[]> values(new GLint[num_written]);
+ GetHelper(pname, values.get(), &num_written);
+ for (GLsizei ii = 0; ii < num_written; ++ii) {
+ params[ii] = static_cast<GLfloat>(values[ii]);
+ }
+ } else {
+ pname = AdjustGetPname(pname);
+ glGetFloatv(pname, params);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoGetIntegerv(GLenum pname, GLint* params) {
+ DCHECK(params);
+ GLsizei num_written;
+ if (!state_.GetStateAsGLint(pname, params, &num_written) &&
+ !GetHelper(pname, params, &num_written)) {
+ pname = AdjustGetPname(pname);
+ glGetIntegerv(pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetProgramiv(
+ GLuint program_id, GLenum pname, GLint* params) {
+ Program* program = GetProgramInfoNotShader(program_id, "glGetProgramiv");
+ if (!program) {
+ return;
+ }
+ program->GetProgramiv(pname, params);
+}
+
+void GLES2DecoderImpl::DoGetBufferParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ // Just delegate it. Some validation is actually done before this.
+ buffer_manager()->ValidateAndDoGetBufferParameteriv(
+ &state_, target, pname, params);
+}
+
+void GLES2DecoderImpl::DoBindAttribLocation(
+ GLuint program_id, GLuint index, const char* name) {
+ if (!StringIsValidForGLES(name)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glBindAttribLocation", "Invalid character");
+ return;
+ }
+ if (ProgramManager::IsInvalidPrefix(name, strlen(name))) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBindAttribLocation", "reserved prefix");
+ return;
+ }
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glBindAttribLocation", "index out of range");
+ return;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glBindAttribLocation");
+ if (!program) {
+ return;
+ }
+ program->SetAttribLocationBinding(name, static_cast<GLint>(index));
+ glBindAttribLocation(program->service_id(), index, name);
+}
+
+error::Error GLES2DecoderImpl::HandleBindAttribLocationBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindAttribLocationBucket& c =
+ *static_cast<const gles2::cmds::BindAttribLocationBucket*>(cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLuint index = static_cast<GLuint>(c.index);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ DoBindAttribLocation(program, index, name_str.c_str());
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoBindUniformLocationCHROMIUM(
+ GLuint program_id, GLint location, const char* name) {
+ if (!StringIsValidForGLES(name)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "Invalid character");
+ return;
+ }
+ if (ProgramManager::IsInvalidPrefix(name, strlen(name))) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindUniformLocationCHROMIUM", "reserved prefix");
+ return;
+ }
+ if (location < 0 || static_cast<uint32>(location) >=
+ (group_->max_fragment_uniform_vectors() +
+ group_->max_vertex_uniform_vectors()) * 4) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "location out of range");
+ return;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glBindUniformLocationCHROMIUM");
+ if (!program) {
+ return;
+ }
+ if (!program->SetUniformLocationBinding(name, location)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "location out of range");
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleBindUniformLocationCHROMIUMBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindUniformLocationCHROMIUMBucket& c =
+ *static_cast<const gles2::cmds::BindUniformLocationCHROMIUMBucket*>(
+ cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLint location = static_cast<GLint>(c.location);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ DoBindUniformLocationCHROMIUM(program, location, name_str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteShader(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteShader& c =
+ *static_cast<const gles2::cmds::DeleteShader*>(cmd_data);
+ GLuint client_id = c.shader;
+ if (client_id) {
+ Shader* shader = GetShader(client_id);
+ if (shader) {
+ if (!shader->IsDeleted()) {
+ glDeleteShader(shader->service_id());
+ shader_manager()->MarkAsDeleted(shader);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDeleteShader", "unknown shader");
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteProgram(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteProgram& c =
+ *static_cast<const gles2::cmds::DeleteProgram*>(cmd_data);
+ GLuint client_id = c.program;
+ if (client_id) {
+ Program* program = GetProgram(client_id);
+ if (program) {
+ if (!program->IsDeleted()) {
+ program_manager()->MarkAsDeleted(shader_manager(), program);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glDeleteProgram", "unknown program");
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::DoClear(GLbitfield mask) {
+ DCHECK(!ShouldDeferDraws());
+ if (CheckBoundFramebuffersValid("glClear")) {
+ ApplyDirtyState();
+ glClear(mask);
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoFramebufferRenderbuffer(
+ GLenum target, GLenum attachment, GLenum renderbuffertarget,
+ GLuint client_renderbuffer_id) {
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glFramebufferRenderbuffer", "no framebuffer bound");
+ return;
+ }
+ GLuint service_id = 0;
+ Renderbuffer* renderbuffer = NULL;
+ if (client_renderbuffer_id) {
+ renderbuffer = GetRenderbuffer(client_renderbuffer_id);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glFramebufferRenderbuffer", "unknown renderbuffer");
+ return;
+ }
+ service_id = renderbuffer->service_id();
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glFramebufferRenderbuffer");
+ glFramebufferRenderbufferEXT(
+ target, attachment, renderbuffertarget, service_id);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glFramebufferRenderbuffer");
+ if (error == GL_NO_ERROR) {
+ framebuffer->AttachRenderbuffer(attachment, renderbuffer);
+ }
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoDisable(GLenum cap) {
+ if (SetCapabilityState(cap, false)) {
+ glDisable(cap);
+ }
+}
+
+void GLES2DecoderImpl::DoEnable(GLenum cap) {
+ if (SetCapabilityState(cap, true)) {
+ glEnable(cap);
+ }
+}
+
+void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) {
+ state_.z_near = std::min(1.0f, std::max(0.0f, znear));
+ state_.z_far = std::min(1.0f, std::max(0.0f, zfar));
+ glDepthRange(znear, zfar);
+}
+
+void GLES2DecoderImpl::DoSampleCoverage(GLclampf value, GLboolean invert) {
+ state_.sample_coverage_value = std::min(1.0f, std::max(0.0f, value));
+ state_.sample_coverage_invert = (invert != 0);
+ glSampleCoverage(state_.sample_coverage_value, invert);
+}
+
+// Assumes framebuffer is complete.
+void GLES2DecoderImpl::ClearUnclearedAttachments(
+ GLenum target, Framebuffer* framebuffer) {
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ // bind this to the DRAW point, clear then bind back to READ
+ // TODO(gman): I don't think there is any guarantee that an FBO that
+ // is complete on the READ attachment will be complete as a DRAW
+ // attachment.
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, 0);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, framebuffer->service_id());
+ }
+ GLbitfield clear_bits = 0;
+ if (framebuffer->HasUnclearedColorAttachments()) {
+ glClearColor(
+ 0.0f, 0.0f, 0.0f,
+ (GLES2Util::GetChannelsForFormat(
+ framebuffer->GetColorAttachmentFormat()) & 0x0008) != 0 ? 0.0f :
+ 1.0f);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ clear_bits |= GL_COLOR_BUFFER_BIT;
+ if (feature_info_->feature_flags().ext_draw_buffers)
+ framebuffer->PrepareDrawBuffersForClear();
+ }
+
+ if (framebuffer->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT) ||
+ framebuffer->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT)) {
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ clear_bits |= GL_STENCIL_BUFFER_BIT;
+ }
+
+ if (framebuffer->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT) ||
+ framebuffer->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT)) {
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ clear_bits |= GL_DEPTH_BUFFER_BIT;
+ }
+
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(clear_bits);
+
+ if ((clear_bits | GL_COLOR_BUFFER_BIT) != 0 &&
+ feature_info_->feature_flags().ext_draw_buffers)
+ framebuffer->RestoreDrawBuffersAfterClear();
+
+ framebuffer_manager()->MarkAttachmentsAsCleared(
+ framebuffer, renderbuffer_manager(), texture_manager());
+
+ RestoreClearState();
+
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, framebuffer->service_id());
+ Framebuffer* draw_framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ GLuint service_id = draw_framebuffer ? draw_framebuffer->service_id() :
+ GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, service_id);
+ }
+}
+
+void GLES2DecoderImpl::RestoreClearState() {
+ framebuffer_state_.clear_state_dirty = true;
+ glClearColor(
+ state_.color_clear_red, state_.color_clear_green, state_.color_clear_blue,
+ state_.color_clear_alpha);
+ glClearStencil(state_.stencil_clear);
+ glClearDepth(state_.depth_clear);
+ if (state_.enable_flags.scissor_test) {
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+ }
+}
+
+GLenum GLES2DecoderImpl::DoCheckFramebufferStatus(GLenum target) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ return GL_FRAMEBUFFER_COMPLETE;
+ }
+ GLenum completeness = framebuffer->IsPossiblyComplete();
+ if (completeness != GL_FRAMEBUFFER_COMPLETE) {
+ return completeness;
+ }
+ return framebuffer->GetStatus(texture_manager(), target);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level) {
+ DoFramebufferTexture2DCommon(
+ "glFramebufferTexture2D", target, attachment,
+ textarget, client_texture_id, level, 0);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2DMultisample(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level, GLsizei samples) {
+ DoFramebufferTexture2DCommon(
+ "glFramebufferTexture2DMultisample", target, attachment,
+ textarget, client_texture_id, level, samples);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2DCommon(
+ const char* name, GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level, GLsizei samples) {
+ if (samples > renderbuffer_manager()->max_samples()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisample", "samples too large");
+ return;
+ }
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ name, "no framebuffer bound.");
+ return;
+ }
+ GLuint service_id = 0;
+ TextureRef* texture_ref = NULL;
+ if (client_texture_id) {
+ texture_ref = GetTexture(client_texture_id);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ name, "unknown texture_ref");
+ return;
+ }
+ service_id = texture_ref->service_id();
+ }
+
+ if (!texture_manager()->ValidForTarget(textarget, level, 0, 0, 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ name, "level out of range");
+ return;
+ }
+
+ if (texture_ref)
+ DoWillUseTexImageIfNeeded(texture_ref->texture(), textarget);
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(name);
+ if (0 == samples) {
+ glFramebufferTexture2DEXT(target, attachment, textarget, service_id, level);
+ } else {
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glFramebufferTexture2DMultisampleIMG(target, attachment, textarget,
+ service_id, level, samples);
+ } else {
+ glFramebufferTexture2DMultisampleEXT(target, attachment, textarget,
+ service_id, level, samples);
+ }
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR(name);
+ if (error == GL_NO_ERROR) {
+ framebuffer->AttachTexture(attachment, texture_ref, textarget, level,
+ samples);
+ }
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ if (texture_ref)
+ DoDidUseTexImageIfNeeded(texture_ref->texture(), textarget);
+
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoGetFramebufferAttachmentParameteriv(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params) {
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetFramebufferAttachmentParameteriv", "no framebuffer bound");
+ return;
+ }
+ if (pname == GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME) {
+ const Framebuffer::Attachment* attachment_object =
+ framebuffer->GetAttachment(attachment);
+ *params = attachment_object ? attachment_object->object_name() : 0;
+ } else {
+ if (pname == GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT &&
+ features().use_img_for_multisampled_render_to_texture) {
+ pname = GL_TEXTURE_SAMPLES_IMG;
+ }
+ glGetFramebufferAttachmentParameterivEXT(target, attachment, pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetRenderbufferParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetRenderbufferParameteriv", "no renderbuffer bound");
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ switch (pname) {
+ case GL_RENDERBUFFER_INTERNAL_FORMAT:
+ *params = renderbuffer->internal_format();
+ break;
+ case GL_RENDERBUFFER_WIDTH:
+ *params = renderbuffer->width();
+ break;
+ case GL_RENDERBUFFER_HEIGHT:
+ *params = renderbuffer->height();
+ break;
+ case GL_RENDERBUFFER_SAMPLES_EXT:
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glGetRenderbufferParameterivEXT(target, GL_RENDERBUFFER_SAMPLES_IMG,
+ params);
+ } else {
+ glGetRenderbufferParameterivEXT(target, GL_RENDERBUFFER_SAMPLES_EXT,
+ params);
+ }
+ default:
+ glGetRenderbufferParameterivEXT(target, pname, params);
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter) {
+ DCHECK(!ShouldDeferReads() && !ShouldDeferDraws());
+
+ if (!CheckBoundFramebuffersValid("glBlitFramebufferCHROMIUM")) {
+ return;
+ }
+
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ BlitFramebufferHelper(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST,
+ state_.enable_flags.scissor_test);
+}
+
+void GLES2DecoderImpl::EnsureRenderbufferBound() {
+ if (!state_.bound_renderbuffer_valid) {
+ state_.bound_renderbuffer_valid = true;
+ glBindRenderbufferEXT(GL_RENDERBUFFER,
+ state_.bound_renderbuffer.get()
+ ? state_.bound_renderbuffer->service_id()
+ : 0);
+ }
+}
+
+void GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height) {
+ // TODO(sievers): This could be resolved at the GL binding level, but the
+ // binding process is currently a bit too 'brute force'.
+ if (feature_info->feature_flags().is_angle) {
+ glRenderbufferStorageMultisampleANGLE(
+ target, samples, internal_format, width, height);
+ } else if (feature_info->feature_flags().use_core_framebuffer_multisample) {
+ glRenderbufferStorageMultisample(
+ target, samples, internal_format, width, height);
+ } else {
+ glRenderbufferStorageMultisampleEXT(
+ target, samples, internal_format, width, height);
+ }
+}
+
+void GLES2DecoderImpl::BlitFramebufferHelper(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ // TODO(sievers): This could be resolved at the GL binding level, but the
+ // binding process is currently a bit too 'brute force'.
+ if (feature_info_->feature_flags().is_angle) {
+ glBlitFramebufferANGLE(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ } else if (feature_info_->feature_flags().use_core_framebuffer_multisample) {
+ glBlitFramebuffer(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ } else {
+ glBlitFramebufferEXT(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ }
+}
+
+bool GLES2DecoderImpl::ValidateRenderbufferStorageMultisample(
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ if (samples > renderbuffer_manager()->max_samples()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisample", "samples too large");
+ return false;
+ }
+
+ if (width > renderbuffer_manager()->max_renderbuffer_size() ||
+ height > renderbuffer_manager()->max_renderbuffer_size()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisample", "dimensions too large");
+ return false;
+ }
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
+ width, height, samples, internalformat, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisample", "dimensions too large");
+ return false;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisample", "out of memory");
+ return false;
+ }
+
+ return true;
+}
+
+void GLES2DecoderImpl::DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer = GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "no renderbuffer bound");
+ return;
+ }
+
+ if (!ValidateRenderbufferStorageMultisample(
+ samples, internalformat, width, height)) {
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ GLenum impl_format =
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(
+ "glRenderbufferStorageMultisampleCHROMIUM");
+ RenderbufferStorageMultisampleHelper(
+ feature_info_.get(), target, samples, impl_format, width, height);
+ GLenum error =
+ LOCAL_PEEK_GL_ERROR("glRenderbufferStorageMultisampleCHROMIUM");
+ if (error == GL_NO_ERROR) {
+
+ if (workarounds().validate_multisample_buffer_allocation) {
+ if (!VerifyMultisampleRenderbufferIntegrity(
+ renderbuffer->service_id(), impl_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisampleCHROMIUM", "out of memory");
+ return;
+ }
+ }
+
+ // TODO(gman): If renderbuffers tracked which framebuffers they were
+ // attached to we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, samples, internalformat, width, height);
+ }
+}
+
+// This is the handler for multisampled_render_to_texture extensions.
+void GLES2DecoderImpl::DoRenderbufferStorageMultisampleEXT(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer = GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleEXT",
+ "no renderbuffer bound");
+ return;
+ }
+
+ if (!ValidateRenderbufferStorageMultisample(
+ samples, internalformat, width, height)) {
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ GLenum impl_format =
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glRenderbufferStorageMultisampleEXT");
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glRenderbufferStorageMultisampleIMG(
+ target, samples, impl_format, width, height);
+ } else {
+ glRenderbufferStorageMultisampleEXT(
+ target, samples, impl_format, width, height);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glRenderbufferStorageMultisampleEXT");
+ if (error == GL_NO_ERROR) {
+ // TODO(gman): If renderbuffers tracked which framebuffers they were
+ // attached to we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, samples, internalformat, width, height);
+ }
+}
+
+// This function validates the allocation of a multisampled renderbuffer
+// by clearing it to a key color, blitting the contents to a texture, and
+// reading back the color to ensure it matches the key.
+bool GLES2DecoderImpl::VerifyMultisampleRenderbufferIntegrity(
+ GLuint renderbuffer, GLenum format) {
+
+ // Only validate color buffers.
+ // These formats have been selected because they are very common or are known
+ // to be used by the WebGL backbuffer. If problems are observed with other
+ // color formats they can be added here.
+ switch(format) {
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGBA:
+ case GL_RGBA8:
+ break;
+ default:
+ return true;
+ }
+
+ GLint draw_framebuffer, read_framebuffer;
+
+ // Cache framebuffer and texture bindings.
+ glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer);
+ glGetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer);
+
+ if (!validation_texture_) {
+ GLint bound_texture;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
+
+ // Create additional resources needed for the verification.
+ glGenTextures(1, &validation_texture_);
+ glGenFramebuffersEXT(1, &validation_fbo_multisample_);
+ glGenFramebuffersEXT(1, &validation_fbo_);
+
+ // Texture only needs to be 1x1.
+ glBindTexture(GL_TEXTURE_2D, validation_texture_);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 0, GL_RGB,
+ GL_UNSIGNED_BYTE, NULL);
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, validation_texture_, 0);
+
+ glBindTexture(GL_TEXTURE_2D, bound_texture);
+ }
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_multisample_);
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER, renderbuffer);
+
+ // Cache current state and reset it to the values we require.
+ GLboolean scissor_enabled = false;
+ glGetBooleanv(GL_SCISSOR_TEST, &scissor_enabled);
+ if (scissor_enabled)
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+
+ GLboolean color_mask[4] = {GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE};
+ glGetBooleanv(GL_COLOR_WRITEMASK, color_mask);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+
+ GLfloat clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ glGetFloatv(GL_COLOR_CLEAR_VALUE, clear_color);
+ glClearColor(1.0f, 0.0f, 1.0f, 1.0f);
+
+ // Clear the buffer to the desired key color.
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ // Blit from the multisample buffer to a standard texture.
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, validation_fbo_multisample_);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, validation_fbo_);
+
+ BlitFramebufferHelper(
+ 0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+
+ // Read a pixel from the buffer.
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_);
+
+ unsigned char pixel[3] = {0, 0, 0};
+ glReadPixels(0, 0, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &pixel);
+
+ // Detach the renderbuffer.
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_multisample_);
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER, 0);
+
+ // Restore cached state.
+ if (scissor_enabled)
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+
+ state_.SetDeviceColorMask(
+ color_mask[0], color_mask[1], color_mask[2], color_mask[3]);
+ glClearColor(clear_color[0], clear_color[1], clear_color[2], clear_color[3]);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, draw_framebuffer);
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, read_framebuffer);
+
+ // Return true if the pixel matched the desired key color.
+ return (pixel[0] == 0xFF &&
+ pixel[1] == 0x00 &&
+ pixel[2] == 0xFF);
+}
+
+void GLES2DecoderImpl::DoRenderbufferStorage(
+ GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glRenderbufferStorage", "no renderbuffer bound");
+ return;
+ }
+
+ if (width > renderbuffer_manager()->max_renderbuffer_size() ||
+ height > renderbuffer_manager()->max_renderbuffer_size()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorage", "dimensions too large");
+ return;
+ }
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
+ width, height, 1, internalformat, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glRenderbufferStorage", "dimensions too large");
+ return;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glRenderbufferStorage", "out of memory");
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glRenderbufferStorage");
+ glRenderbufferStorageEXT(
+ target,
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat),
+ width,
+ height);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glRenderbufferStorage");
+ if (error == GL_NO_ERROR) {
+ // TODO(gman): If tetxures tracked which framebuffers they were attached to
+ // we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, 1, internalformat, width, height);
+ }
+}
+
+void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoLinkProgram");
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glLinkProgram");
+ if (!program) {
+ return;
+ }
+
+ LogClientServiceForInfo(program, program_id, "glLinkProgram");
+ ShaderTranslator* vertex_translator = NULL;
+ ShaderTranslator* fragment_translator = NULL;
+ if (use_shader_translator_) {
+ vertex_translator = vertex_translator_.get();
+ fragment_translator = fragment_translator_.get();
+ }
+ if (program->Link(shader_manager(),
+ vertex_translator,
+ fragment_translator,
+ workarounds().count_all_in_varyings_packing ?
+ Program::kCountAll : Program::kCountOnlyStaticallyUsed,
+ shader_cache_callback_)) {
+ if (program == state_.current_program.get()) {
+ if (workarounds().use_current_program_after_successful_link)
+ glUseProgram(program->service_id());
+ if (workarounds().clear_uniforms_before_first_program_use)
+ program_manager()->ClearUniforms(program);
+ }
+ }
+
+ // LinkProgram can be very slow. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+};
+
+void GLES2DecoderImpl::DoTexParameterf(
+ GLenum target, GLenum pname, GLfloat param) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameterf", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameterf(
+ "glTexParameterf", GetErrorState(), texture, pname, param);
+}
+
+void GLES2DecoderImpl::DoTexParameteri(
+ GLenum target, GLenum pname, GLint param) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameteri", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameteri(
+ "glTexParameteri", GetErrorState(), texture, pname, param);
+}
+
+void GLES2DecoderImpl::DoTexParameterfv(
+ GLenum target, GLenum pname, const GLfloat* params) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameterfv", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameterf(
+ "glTexParameterfv", GetErrorState(), texture, pname, *params);
+}
+
+void GLES2DecoderImpl::DoTexParameteriv(
+ GLenum target, GLenum pname, const GLint* params) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexParameteriv", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameteri(
+ "glTexParameteriv", GetErrorState(), texture, pname, *params);
+}
+
+bool GLES2DecoderImpl::CheckCurrentProgram(const char* function_name) {
+ if (!state_.current_program.get()) {
+ // The program does not exist.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "no program in use");
+ return false;
+ }
+ if (!state_.current_program->InUse()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "program not linked");
+ return false;
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::CheckCurrentProgramForUniform(
+ GLint location, const char* function_name) {
+ if (!CheckCurrentProgram(function_name)) {
+ return false;
+ }
+ return location != -1;
+}
+
+bool GLES2DecoderImpl::PrepForSetUniformByLocation(
+ GLint fake_location,
+ const char* function_name,
+ Program::UniformApiType api_type,
+ GLint* real_location,
+ GLenum* type,
+ GLsizei* count) {
+ DCHECK(type);
+ DCHECK(count);
+ DCHECK(real_location);
+
+ if (!CheckCurrentProgramForUniform(fake_location, function_name)) {
+ return false;
+ }
+ GLint array_index = -1;
+ const Program::UniformInfo* info =
+ state_.current_program->GetUniformInfoByFakeLocation(
+ fake_location, real_location, &array_index);
+ if (!info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "unknown location");
+ return false;
+ }
+
+ if ((api_type & info->accepts_api_type) == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "wrong uniform function for type");
+ return false;
+ }
+ if (*count > 1 && !info->is_array) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "count > 1 for non-array");
+ return false;
+ }
+ *count = std::min(info->size - array_index, *count);
+ if (*count <= 0) {
+ return false;
+ }
+ *type = info->type;
+ return true;
+}
+
+void GLES2DecoderImpl::DoUniform1i(GLint fake_location, GLint v0) {
+ GLenum type = 0;
+ GLsizei count = 1;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1i",
+ Program::kUniform1i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (!state_.current_program->SetSamplers(
+ state_.texture_units.size(), fake_location, 1, &v0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glUniform1i", "texture unit out of range");
+ return;
+ }
+ glUniform1i(real_location, v0);
+}
+
+void GLES2DecoderImpl::DoUniform1iv(
+ GLint fake_location, GLsizei count, const GLint *value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1iv",
+ Program::kUniform1i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_SAMPLER_2D || type == GL_SAMPLER_2D_RECT_ARB ||
+ type == GL_SAMPLER_CUBE || type == GL_SAMPLER_EXTERNAL_OES) {
+ if (!state_.current_program->SetSamplers(
+ state_.texture_units.size(), fake_location, count, value)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glUniform1iv", "texture unit out of range");
+ return;
+ }
+ }
+ glUniform1iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform1fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1fv",
+ Program::kUniform1f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL) {
+ scoped_ptr<GLint[]> temp(new GLint[count]);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ DoUniform1iv(real_location, count, temp.get());
+ } else {
+ glUniform1fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform2fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform2fv",
+ Program::kUniform2f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC2) {
+ GLsizei num_values = count * 2;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform2iv(real_location, count, temp.get());
+ } else {
+ glUniform2fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform3fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform3fv",
+ Program::kUniform3f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC3) {
+ GLsizei num_values = count * 3;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform3iv(real_location, count, temp.get());
+ } else {
+ glUniform3fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform4fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform4fv",
+ Program::kUniform4f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC4) {
+ GLsizei num_values = count * 4;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform4iv(real_location, count, temp.get());
+ } else {
+ glUniform4fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform2iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform2iv",
+ Program::kUniform2i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform2iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform3iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform3iv",
+ Program::kUniform3i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform3iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform4iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform4iv",
+ Program::kUniform4i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform4iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix2fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix2fv",
+ Program::kUniformMatrix2f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix2fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix3fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix3fv",
+ Program::kUniformMatrix3f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix3fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix4fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix4fv",
+ Program::kUniformMatrix4f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix4fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUseProgram(GLuint program_id) {
+ GLuint service_id = 0;
+ Program* program = NULL;
+ if (program_id) {
+ program = GetProgramInfoNotShader(program_id, "glUseProgram");
+ if (!program) {
+ return;
+ }
+ if (!program->IsValid()) {
+ // Program was not linked successfully. (ie, glLinkProgram)
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glUseProgram", "program not linked");
+ return;
+ }
+ service_id = program->service_id();
+ }
+ if (state_.current_program.get()) {
+ program_manager()->UnuseProgram(shader_manager(),
+ state_.current_program.get());
+ }
+ state_.current_program = program;
+ LogClientServiceMapping("glUseProgram", program_id, service_id);
+ glUseProgram(service_id);
+ if (state_.current_program.get()) {
+ program_manager()->UseProgram(state_.current_program.get());
+ if (workarounds().clear_uniforms_before_first_program_use)
+ program_manager()->ClearUniforms(program);
+ }
+}
+
+void GLES2DecoderImpl::RenderWarning(
+ const char* filename, int line, const std::string& msg) {
+ logger_.LogMessage(filename, line, std::string("RENDER WARNING: ") + msg);
+}
+
+void GLES2DecoderImpl::PerformanceWarning(
+ const char* filename, int line, const std::string& msg) {
+ logger_.LogMessage(filename, line,
+ std::string("PERFORMANCE WARNING: ") + msg);
+}
+
+void GLES2DecoderImpl::DoWillUseTexImageIfNeeded(
+ Texture* texture, GLenum textarget) {
+ // Image is already in use if texture is attached to a framebuffer.
+ if (texture && !texture->IsAttachedToFramebuffer()) {
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoWillUseTexImageIfNeeded",
+ GetErrorState());
+ glBindTexture(textarget, texture->service_id());
+ image->WillUseTexImage();
+ RestoreCurrentTextureBindings(&state_, textarget);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoDidUseTexImageIfNeeded(
+ Texture* texture, GLenum textarget) {
+ // Image is still in use if texture is attached to a framebuffer.
+ if (texture && !texture->IsAttachedToFramebuffer()) {
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoDidUseTexImageIfNeeded",
+ GetErrorState());
+ glBindTexture(textarget, texture->service_id());
+ image->DidUseTexImage();
+ RestoreCurrentTextureBindings(&state_, textarget);
+ }
+ }
+}
+
+bool GLES2DecoderImpl::PrepareTexturesForRender() {
+ DCHECK(state_.current_program.get());
+ if (!texture_manager()->HaveUnrenderableTextures() &&
+ !texture_manager()->HaveImages()) {
+ return true;
+ }
+
+ bool textures_set = false;
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ GLenum textarget = GetBindTargetForSamplerType(uniform_info->type);
+ if (!texture_ref || !texture_manager()->CanRender(texture_ref)) {
+ textures_set = true;
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ glBindTexture(
+ textarget,
+ texture_manager()->black_texture_id(uniform_info->type));
+ LOCAL_RENDER_WARNING(
+ std::string("texture bound to texture unit ") +
+ base::IntToString(texture_unit_index) +
+ " is not renderable. It maybe non-power-of-2 and have"
+ " incompatible texture filtering or is not"
+ " 'texture complete'");
+ continue;
+ }
+
+ if (textarget != GL_TEXTURE_CUBE_MAP) {
+ Texture* texture = texture_ref->texture();
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image && !texture->IsAttachedToFramebuffer()) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::PrepareTexturesForRender", GetErrorState());
+ textures_set = true;
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ image->WillUseTexImage();
+ continue;
+ }
+ }
+ }
+ // else: should this be an error?
+ }
+ }
+ return !textures_set;
+}
+
+void GLES2DecoderImpl::RestoreStateForTextures() {
+ DCHECK(state_.current_program.get());
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ if (!texture_ref || !texture_manager()->CanRender(texture_ref)) {
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ // Get the texture_ref info that was previously bound here.
+ texture_ref = texture_unit.bind_target == GL_TEXTURE_2D
+ ? texture_unit.bound_texture_2d.get()
+ : texture_unit.bound_texture_cube_map.get();
+ glBindTexture(texture_unit.bind_target,
+ texture_ref ? texture_ref->service_id() : 0);
+ continue;
+ }
+
+ if (texture_unit.bind_target != GL_TEXTURE_CUBE_MAP) {
+ Texture* texture = texture_ref->texture();
+ gfx::GLImage* image =
+ texture->GetLevelImage(texture_unit.bind_target, 0);
+ if (image && !texture->IsAttachedToFramebuffer()) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::RestoreStateForTextures", GetErrorState());
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ image->DidUseTexImage();
+ continue;
+ }
+ }
+ }
+ }
+ }
+ // Set the active texture back to whatever the user had it as.
+ glActiveTexture(GL_TEXTURE0 + state_.active_texture_unit);
+}
+
+bool GLES2DecoderImpl::ClearUnclearedTextures() {
+ // Only check if there are some uncleared textures.
+ if (!texture_manager()->HaveUnsafeTextures()) {
+ return true;
+ }
+
+ // 1: Check all textures we are about to render with.
+ if (state_.current_program.get()) {
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ if (texture_ref && !texture_ref->texture()->SafeToRenderFrom()) {
+ if (!texture_manager()->ClearRenderableLevels(this, texture_ref)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::IsDrawValid(
+ const char* function_name, GLuint max_vertex_accessed, bool instanced,
+ GLsizei primcount) {
+ DCHECK(instanced || primcount == 1);
+
+ // NOTE: We specifically do not check current_program->IsValid() because
+ // it could never be invalid since glUseProgram would have failed. While
+ // glLinkProgram could later mark the program as invalid the previous
+ // valid program will still function if it is still the current program.
+ if (!state_.current_program.get()) {
+ // The program does not exist.
+ // But GL says no ERROR.
+ LOCAL_RENDER_WARNING("Drawing with no current shader program.");
+ return false;
+ }
+
+ return state_.vertex_attrib_manager
+ ->ValidateBindings(function_name,
+ this,
+ feature_info_.get(),
+ state_.current_program.get(),
+ max_vertex_accessed,
+ instanced,
+ primcount);
+}
+
+bool GLES2DecoderImpl::SimulateAttrib0(
+ const char* function_name, GLuint max_vertex_accessed, bool* simulated) {
+ DCHECK(simulated);
+ *simulated = false;
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2)
+ return true;
+
+ const VertexAttrib* attrib =
+ state_.vertex_attrib_manager->GetVertexAttrib(0);
+ // If it's enabled or it's not used then we don't need to do anything.
+ bool attrib_0_used =
+ state_.current_program->GetAttribInfoByLocation(0) != NULL;
+ if (attrib->enabled() && attrib_0_used) {
+ return true;
+ }
+
+ // Make a buffer with a single repeated vec4 value enough to
+ // simulate the constant value that is supposed to be here.
+ // This is required to emulate GLES2 on GL.
+ GLuint num_vertices = max_vertex_accessed + 1;
+ uint32 size_needed = 0;
+
+ if (num_vertices == 0 ||
+ !SafeMultiplyUint32(num_vertices, sizeof(Vec4), &size_needed) ||
+ size_needed > 0x7FFFFFFFU) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+
+ LOCAL_PERFORMANCE_WARNING(
+ "Attribute 0 is disabled. This has signficant performance penalty");
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name);
+ glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
+
+ bool new_buffer = static_cast<GLsizei>(size_needed) > attrib_0_size_;
+ if (new_buffer) {
+ glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW);
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ }
+
+ const Vec4& value = state_.attrib_values[0];
+ if (new_buffer ||
+ (attrib_0_used &&
+ (!attrib_0_buffer_matches_value_ ||
+ (value.v[0] != attrib_0_value_.v[0] ||
+ value.v[1] != attrib_0_value_.v[1] ||
+ value.v[2] != attrib_0_value_.v[2] ||
+ value.v[3] != attrib_0_value_.v[3])))) {
+ std::vector<Vec4> temp(num_vertices, value);
+ glBufferSubData(GL_ARRAY_BUFFER, 0, size_needed, &temp[0].v[0]);
+ attrib_0_buffer_matches_value_ = true;
+ attrib_0_value_ = value;
+ attrib_0_size_ = size_needed;
+ }
+
+ glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL);
+
+ if (attrib->divisor())
+ glVertexAttribDivisorANGLE(0, 0);
+
+ *simulated = true;
+ return true;
+}
+
+void GLES2DecoderImpl::RestoreStateForAttrib(
+ GLuint attrib_index, bool restore_array_binding) {
+ const VertexAttrib* attrib =
+ state_.vertex_attrib_manager->GetVertexAttrib(attrib_index);
+ if (restore_array_binding) {
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ Buffer* buffer = attrib->buffer();
+ glBindBuffer(GL_ARRAY_BUFFER, buffer ? buffer->service_id() : 0);
+ glVertexAttribPointer(
+ attrib_index, attrib->size(), attrib->type(), attrib->normalized(),
+ attrib->gl_stride(), ptr);
+ }
+ if (attrib->divisor())
+ glVertexAttribDivisorANGLE(attrib_index, attrib->divisor());
+ glBindBuffer(
+ GL_ARRAY_BUFFER, state_.bound_array_buffer.get() ?
+ state_.bound_array_buffer->service_id() : 0);
+
+ // Never touch vertex attribute 0's state (in particular, never
+ // disable it) when running on desktop GL because it will never be
+ // re-enabled.
+ if (attrib_index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ if (attrib->enabled()) {
+ glEnableVertexAttribArray(attrib_index);
+ } else {
+ glDisableVertexAttribArray(attrib_index);
+ }
+ }
+}
+
+bool GLES2DecoderImpl::SimulateFixedAttribs(
+ const char* function_name,
+ GLuint max_vertex_accessed, bool* simulated, GLsizei primcount) {
+ DCHECK(simulated);
+ *simulated = false;
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2)
+ return true;
+
+ if (!state_.vertex_attrib_manager->HaveFixedAttribs()) {
+ return true;
+ }
+
+ LOCAL_PERFORMANCE_WARNING(
+ "GL_FIXED attributes have a signficant performance penalty");
+
+ // NOTE: we could be smart and try to check if a buffer is used
+ // twice in 2 different attribs, find the overlapping parts and therefore
+ // duplicate the minimum amount of data but this whole code path is not meant
+ // to be used normally. It's just here to pass that OpenGL ES 2.0 conformance
+ // tests so we just add to the buffer attrib used.
+
+ GLuint elements_needed = 0;
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ state_.vertex_attrib_manager->GetEnabledVertexAttribs();
+ for (VertexAttribManager::VertexAttribList::const_iterator it =
+ enabled_attribs.begin(); it != enabled_attribs.end(); ++it) {
+ const VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ state_.current_program->GetAttribInfoByLocation(attrib->index());
+ GLuint max_accessed = attrib->MaxVertexAccessed(primcount,
+ max_vertex_accessed);
+ GLuint num_vertices = max_accessed + 1;
+ if (num_vertices == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ if (attrib_info &&
+ attrib->CanAccess(max_accessed) &&
+ attrib->type() == GL_FIXED) {
+ uint32 elements_used = 0;
+ if (!SafeMultiplyUint32(num_vertices, attrib->size(), &elements_used) ||
+ !SafeAddUint32(elements_needed, elements_used, &elements_needed)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+ }
+ }
+
+ const uint32 kSizeOfFloat = sizeof(float); // NOLINT
+ uint32 size_needed = 0;
+ if (!SafeMultiplyUint32(elements_needed, kSizeOfFloat, &size_needed) ||
+ size_needed > 0x7FFFFFFFU) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name);
+
+ glBindBuffer(GL_ARRAY_BUFFER, fixed_attrib_buffer_id_);
+ if (static_cast<GLsizei>(size_needed) > fixed_attrib_buffer_size_) {
+ glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW);
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+ }
+
+ // Copy the elements and convert to float
+ GLintptr offset = 0;
+ for (VertexAttribManager::VertexAttribList::const_iterator it =
+ enabled_attribs.begin(); it != enabled_attribs.end(); ++it) {
+ const VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ state_.current_program->GetAttribInfoByLocation(attrib->index());
+ GLuint max_accessed = attrib->MaxVertexAccessed(primcount,
+ max_vertex_accessed);
+ GLuint num_vertices = max_accessed + 1;
+ if (num_vertices == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ if (attrib_info &&
+ attrib->CanAccess(max_accessed) &&
+ attrib->type() == GL_FIXED) {
+ int num_elements = attrib->size() * kSizeOfFloat;
+ int size = num_elements * num_vertices;
+ scoped_ptr<float[]> data(new float[size]);
+ const int32* src = reinterpret_cast<const int32 *>(
+ attrib->buffer()->GetRange(attrib->offset(), size));
+ const int32* end = src + num_elements;
+ float* dst = data.get();
+ while (src != end) {
+ *dst++ = static_cast<float>(*src++) / 65536.0f;
+ }
+ glBufferSubData(GL_ARRAY_BUFFER, offset, size, data.get());
+ glVertexAttribPointer(
+ attrib->index(), attrib->size(), GL_FLOAT, false, 0,
+ reinterpret_cast<GLvoid*>(offset));
+ offset += size;
+ }
+ }
+ *simulated = true;
+ return true;
+}
+
+void GLES2DecoderImpl::RestoreStateForSimulatedFixedAttribs() {
+ // There's no need to call glVertexAttribPointer because we shadow all the
+ // settings and passing GL_FIXED to it will not work.
+ glBindBuffer(
+ GL_ARRAY_BUFFER,
+ state_.bound_array_buffer.get() ? state_.bound_array_buffer->service_id()
+ : 0);
+}
+
+error::Error GLES2DecoderImpl::DoDrawArrays(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ error::Error error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ if (!validators_->draw_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
+ return error::kNoError;
+ }
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return error::kNoError;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return error::kNoError;
+ }
+ if (!CheckBoundFramebuffersValid(function_name)) {
+ return error::kNoError;
+ }
+ // We have to check this here because the prototype for glDrawArrays
+ // is GLint not GLsizei.
+ if (first < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "first < 0");
+ return error::kNoError;
+ }
+
+ if (count == 0 || primcount == 0) {
+ LOCAL_RENDER_WARNING("Render count or primcount is 0.");
+ return error::kNoError;
+ }
+
+ GLuint max_vertex_accessed = first + count - 1;
+ if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
+ if (!ClearUnclearedTextures()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ return error::kNoError;
+ }
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(
+ function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ return error::kNoError;
+ }
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(
+ function_name, max_vertex_accessed, &simulated_fixed_attribs,
+ primcount)) {
+ bool textures_set = !PrepareTexturesForRender();
+ ApplyDirtyState();
+ ScopedRenderTo do_render(framebuffer_state_.bound_draw_framebuffer.get());
+ if (!instanced) {
+ glDrawArrays(mode, first, count);
+ } else {
+ glDrawArraysInstancedANGLE(mode, first, count, primcount);
+ }
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
+ }
+ }
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawArrays(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const cmds::DrawArrays& c = *static_cast<const cmds::DrawArrays*>(cmd_data);
+ return DoDrawArrays("glDrawArrays",
+ false,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLint>(c.first),
+ static_cast<GLsizei>(c.count),
+ 1);
+}
+
+error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawArraysInstancedANGLE& c =
+ *static_cast<const gles2::cmds::DrawArraysInstancedANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawArraysInstancedANGLE", "function not available");
+ return error::kNoError;
+ }
+ return DoDrawArrays("glDrawArraysIntancedANGLE",
+ true,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLint>(c.first),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLsizei>(c.primcount));
+}
+
+error::Error GLES2DecoderImpl::DoDrawElements(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ GLsizei count,
+ GLenum type,
+ int32 offset,
+ GLsizei primcount) {
+ error::Error error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ if (!state_.vertex_attrib_manager->element_array_buffer()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "No element array buffer bound");
+ return error::kNoError;
+ }
+
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return error::kNoError;
+ }
+ if (offset < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "offset < 0");
+ return error::kNoError;
+ }
+ if (!validators_->draw_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
+ return error::kNoError;
+ }
+ if (!validators_->index_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, type, "type");
+ return error::kNoError;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return error::kNoError;
+ }
+
+ if (!CheckBoundFramebuffersValid(function_name)) {
+ return error::kNoError;
+ }
+
+ if (count == 0 || primcount == 0) {
+ return error::kNoError;
+ }
+
+ GLuint max_vertex_accessed;
+ Buffer* element_array_buffer =
+ state_.vertex_attrib_manager->element_array_buffer();
+
+ if (!element_array_buffer->GetMaxValueForRange(
+ offset, count, type, &max_vertex_accessed)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "range out of bounds for buffer");
+ return error::kNoError;
+ }
+
+ if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
+ if (!ClearUnclearedTextures()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ return error::kNoError;
+ }
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(
+ function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ return error::kNoError;
+ }
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(
+ function_name, max_vertex_accessed, &simulated_fixed_attribs,
+ primcount)) {
+ bool textures_set = !PrepareTexturesForRender();
+ ApplyDirtyState();
+ // TODO(gman): Refactor to hide these details in BufferManager or
+ // VertexAttribManager.
+ const GLvoid* indices = reinterpret_cast<const GLvoid*>(offset);
+ bool used_client_side_array = false;
+ if (element_array_buffer->IsClientSideArray()) {
+ used_client_side_array = true;
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ indices = element_array_buffer->GetRange(offset, 0);
+ }
+
+ ScopedRenderTo do_render(framebuffer_state_.bound_draw_framebuffer.get());
+ if (!instanced) {
+ glDrawElements(mode, count, type, indices);
+ } else {
+ glDrawElementsInstancedANGLE(mode, count, type, indices, primcount);
+ }
+
+ if (used_client_side_array) {
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer->service_id());
+ }
+
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
+ }
+ }
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawElements(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawElements& c =
+ *static_cast<const gles2::cmds::DrawElements*>(cmd_data);
+ return DoDrawElements("glDrawElements",
+ false,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLenum>(c.type),
+ static_cast<int32>(c.index_offset),
+ 1);
+}
+
+error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawElementsInstancedANGLE& c =
+ *static_cast<const gles2::cmds::DrawElementsInstancedANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawElementsInstancedANGLE", "function not available");
+ return error::kNoError;
+ }
+ return DoDrawElements("glDrawElementsInstancedANGLE",
+ true,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLenum>(c.type),
+ static_cast<int32>(c.index_offset),
+ static_cast<GLsizei>(c.primcount));
+}
+
+GLuint GLES2DecoderImpl::DoGetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ GLuint max_vertex_accessed = 0;
+ Buffer* buffer = GetBuffer(buffer_id);
+ if (!buffer) {
+ // TODO(gman): Should this be a GL error or a command buffer error?
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "GetMaxValueInBufferCHROMIUM", "unknown buffer");
+ } else {
+ if (!buffer->GetMaxValueForRange(
+ offset, count, type, &max_vertex_accessed)) {
+ // TODO(gman): Should this be a GL error or a command buffer error?
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "GetMaxValueInBufferCHROMIUM", "range out of bounds for buffer");
+ }
+ }
+ return max_vertex_accessed;
+}
+
+// Calls glShaderSource for the various versions of the ShaderSource command.
+// Assumes that data / data_size points to a piece of memory that is in range
+// of whatever context it came from (shared memory, immediate memory, bucket
+// memory.)
+error::Error GLES2DecoderImpl::ShaderSourceHelper(
+ GLuint client_id, const char* data, uint32 data_size) {
+ std::string str(data, data + data_size);
+ Shader* shader = GetShaderInfoNotProgram(client_id, "glShaderSource");
+ if (!shader) {
+ return error::kNoError;
+ }
+ // Note: We don't actually call glShaderSource here. We wait until
+ // the call to glCompileShader.
+ shader->set_source(str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleShaderSourceBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ShaderSourceBucket& c =
+ *static_cast<const gles2::cmds::ShaderSourceBucket*>(cmd_data);
+ Bucket* bucket = GetBucket(c.data_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ return ShaderSourceHelper(
+ c.shader, bucket->GetDataAs<const char*>(0, bucket->size() - 1),
+ bucket->size() - 1);
+}
+
+void GLES2DecoderImpl::DoCompileShader(GLuint client_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCompileShader");
+ Shader* shader = GetShaderInfoNotProgram(client_id, "glCompileShader");
+ if (!shader) {
+ return;
+ }
+ ShaderTranslator* translator = NULL;
+ if (use_shader_translator_) {
+ translator = shader->shader_type() == GL_VERTEX_SHADER ?
+ vertex_translator_.get() : fragment_translator_.get();
+ }
+
+ shader->DoCompile(
+ translator,
+ feature_info_->feature_flags().angle_translated_shader_source ?
+ Shader::kANGLE : Shader::kGL);
+
+ // CompileShader can be very slow. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+void GLES2DecoderImpl::DoGetShaderiv(
+ GLuint shader_id, GLenum pname, GLint* params) {
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderiv");
+ if (!shader) {
+ return;
+ }
+ switch (pname) {
+ case GL_SHADER_SOURCE_LENGTH:
+ *params = shader->source().size();
+ if (*params)
+ ++(*params);
+ return;
+ case GL_COMPILE_STATUS:
+ *params = compile_shader_always_succeeds_ ? true : shader->valid();
+ return;
+ case GL_INFO_LOG_LENGTH:
+ *params = shader->log_info().size();
+ if (*params)
+ ++(*params);
+ return;
+ case GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE:
+ *params = shader->translated_source().size();
+ if (*params)
+ ++(*params);
+ return;
+ default:
+ break;
+ }
+ glGetShaderiv(shader->service_id(), pname, params);
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderSource(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderSource& c =
+ *static_cast<const gles2::cmds::GetShaderSource*>(cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderSource");
+ if (!shader || shader->source().empty()) {
+ bucket->SetSize(0);
+ return error::kNoError;
+ }
+ bucket->SetFromString(shader->source().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTranslatedShaderSourceANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTranslatedShaderSourceANGLE& c =
+ *static_cast<const gles2::cmds::GetTranslatedShaderSourceANGLE*>(
+ cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(
+ shader_id, "glGetTranslatedShaderSourceANGLE");
+ if (!shader) {
+ bucket->SetSize(0);
+ return error::kNoError;
+ }
+
+ bucket->SetFromString(shader->translated_source().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramInfoLog(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramInfoLog& c =
+ *static_cast<const gles2::cmds::GetProgramInfoLog*>(cmd_data);
+ GLuint program_id = c.program;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetProgramInfoLog");
+ if (!program || !program->log_info()) {
+ bucket->SetFromString("");
+ return error::kNoError;
+ }
+ bucket->SetFromString(program->log_info()->c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderInfoLog(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderInfoLog& c =
+ *static_cast<const gles2::cmds::GetShaderInfoLog*>(cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderInfoLog");
+ if (!shader) {
+ bucket->SetFromString("");
+ return error::kNoError;
+ }
+ bucket->SetFromString(shader->log_info().c_str());
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::DoIsEnabled(GLenum cap) {
+ return state_.GetEnabled(cap);
+}
+
+bool GLES2DecoderImpl::DoIsBuffer(GLuint client_id) {
+ const Buffer* buffer = GetBuffer(client_id);
+ return buffer && buffer->IsValid() && !buffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsFramebuffer(GLuint client_id) {
+ const Framebuffer* framebuffer =
+ GetFramebuffer(client_id);
+ return framebuffer && framebuffer->IsValid() && !framebuffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsProgram(GLuint client_id) {
+ // IsProgram is true for programs as soon as they are created, until they are
+ // deleted and no longer in use.
+ const Program* program = GetProgram(client_id);
+ return program != NULL && !program->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsRenderbuffer(GLuint client_id) {
+ const Renderbuffer* renderbuffer =
+ GetRenderbuffer(client_id);
+ return renderbuffer && renderbuffer->IsValid() && !renderbuffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsShader(GLuint client_id) {
+ // IsShader is true for shaders as soon as they are created, until they
+ // are deleted and not attached to any programs.
+ const Shader* shader = GetShader(client_id);
+ return shader != NULL && !shader->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsTexture(GLuint client_id) {
+ const TextureRef* texture_ref = GetTexture(client_id);
+ return texture_ref && texture_ref->texture()->IsValid();
+}
+
+void GLES2DecoderImpl::DoAttachShader(
+ GLuint program_client_id, GLint shader_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glAttachShader");
+ if (!program) {
+ return;
+ }
+ Shader* shader = GetShaderInfoNotProgram(shader_client_id, "glAttachShader");
+ if (!shader) {
+ return;
+ }
+ if (!program->AttachShader(shader_manager(), shader)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glAttachShader",
+ "can not attach more than one shader of the same type.");
+ return;
+ }
+ glAttachShader(program->service_id(), shader->service_id());
+}
+
+void GLES2DecoderImpl::DoDetachShader(
+ GLuint program_client_id, GLint shader_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glDetachShader");
+ if (!program) {
+ return;
+ }
+ Shader* shader = GetShaderInfoNotProgram(shader_client_id, "glDetachShader");
+ if (!shader) {
+ return;
+ }
+ if (!program->DetachShader(shader_manager(), shader)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDetachShader", "shader not attached to program");
+ return;
+ }
+ glDetachShader(program->service_id(), shader->service_id());
+}
+
+void GLES2DecoderImpl::DoValidateProgram(GLuint program_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glValidateProgram");
+ if (!program) {
+ return;
+ }
+ program->Validate();
+}
+
+void GLES2DecoderImpl::GetVertexAttribHelper(
+ const VertexAttrib* attrib, GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING: {
+ Buffer* buffer = attrib->buffer();
+ if (buffer && !buffer->IsDeleted()) {
+ GLuint client_id;
+ buffer_manager()->GetClientId(buffer->service_id(), &client_id);
+ *params = client_id;
+ }
+ break;
+ }
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ *params = attrib->enabled();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ *params = attrib->size();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ *params = attrib->gl_stride();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ *params = attrib->type();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ *params = attrib->normalized();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE:
+ *params = attrib->divisor();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoGetTexParameterfv(
+ GLenum target, GLenum pname, GLfloat* params) {
+ InitTextureMaxAnisotropyIfNeeded(target, pname);
+ glGetTexParameterfv(target, pname, params);
+}
+
+void GLES2DecoderImpl::DoGetTexParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ InitTextureMaxAnisotropyIfNeeded(target, pname);
+ glGetTexParameteriv(target, pname, params);
+}
+
+void GLES2DecoderImpl::InitTextureMaxAnisotropyIfNeeded(
+ GLenum target, GLenum pname) {
+ if (!workarounds().init_texture_max_anisotropy)
+ return;
+ if (pname != GL_TEXTURE_MAX_ANISOTROPY_EXT ||
+ !validators_->texture_parameter.IsValid(pname)) {
+ return;
+ }
+
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetTexParamter{fi}v", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ texture->InitTextureMaxAnisotropyIfNeeded(target);
+}
+
+void GLES2DecoderImpl::DoGetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) {
+ VertexAttrib* attrib = state_.vertex_attrib_manager->GetVertexAttrib(index);
+ if (!attrib) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribfv", "index out of range");
+ return;
+ }
+ switch (pname) {
+ case GL_CURRENT_VERTEX_ATTRIB: {
+ const Vec4& value = state_.attrib_values[index];
+ params[0] = value.v[0];
+ params[1] = value.v[1];
+ params[2] = value.v[2];
+ params[3] = value.v[3];
+ break;
+ }
+ default: {
+ GLint value = 0;
+ GetVertexAttribHelper(attrib, pname, &value);
+ *params = static_cast<GLfloat>(value);
+ break;
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoGetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) {
+ VertexAttrib* attrib = state_.vertex_attrib_manager->GetVertexAttrib(index);
+ if (!attrib) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribiv", "index out of range");
+ return;
+ }
+ switch (pname) {
+ case GL_CURRENT_VERTEX_ATTRIB: {
+ const Vec4& value = state_.attrib_values[index];
+ params[0] = static_cast<GLint>(value.v[0]);
+ params[1] = static_cast<GLint>(value.v[1]);
+ params[2] = static_cast<GLint>(value.v[2]);
+ params[3] = static_cast<GLint>(value.v[3]);
+ break;
+ }
+ default:
+ GetVertexAttribHelper(attrib, pname, params);
+ break;
+ }
+}
+
+bool GLES2DecoderImpl::SetVertexAttribValue(
+ const char* function_name, GLuint index, const GLfloat* value) {
+ if (index >= state_.attrib_values.size()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "index out of range");
+ return false;
+ }
+ Vec4& v = state_.attrib_values[index];
+ v.v[0] = value[0];
+ v.v[1] = value[1];
+ v.v[2] = value[2];
+ v.v[3] = value[3];
+ return true;
+}
+
+void GLES2DecoderImpl::DoVertexAttrib1f(GLuint index, GLfloat v0) {
+ GLfloat v[4] = { v0, 0.0f, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib1f", index, v)) {
+ glVertexAttrib1f(index, v0);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib2f(GLuint index, GLfloat v0, GLfloat v1) {
+ GLfloat v[4] = { v0, v1, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib2f", index, v)) {
+ glVertexAttrib2f(index, v0, v1);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib3f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2) {
+ GLfloat v[4] = { v0, v1, v2, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib3f", index, v)) {
+ glVertexAttrib3f(index, v0, v1, v2);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib4f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) {
+ GLfloat v[4] = { v0, v1, v2, v3, };
+ if (SetVertexAttribValue("glVertexAttrib4f", index, v)) {
+ glVertexAttrib4f(index, v0, v1, v2, v3);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib1fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], 0.0f, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib1fv", index, t)) {
+ glVertexAttrib1fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib2fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], v[1], 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib2fv", index, t)) {
+ glVertexAttrib2fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib3fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], v[1], v[2], 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib3fv", index, t)) {
+ glVertexAttrib3fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib4fv(GLuint index, const GLfloat* v) {
+ if (SetVertexAttribValue("glVertexAttrib4fv", index, v)) {
+ glVertexAttrib4fv(index, v);
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttribPointer(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttribPointer& c =
+ *static_cast<const gles2::cmds::VertexAttribPointer*>(cmd_data);
+
+ if (!state_.bound_array_buffer.get() ||
+ state_.bound_array_buffer->IsDeleted()) {
+ if (state_.vertex_attrib_manager.get() ==
+ state_.default_vertex_attrib_manager.get()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "no array buffer bound");
+ return error::kNoError;
+ } else if (c.offset != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glVertexAttribPointer", "client side arrays are not allowed");
+ return error::kNoError;
+ }
+ }
+
+ GLuint indx = c.indx;
+ GLint size = c.size;
+ GLenum type = c.type;
+ GLboolean normalized = c.normalized;
+ GLsizei stride = c.stride;
+ GLsizei offset = c.offset;
+ const void* ptr = reinterpret_cast<const void*>(offset);
+ if (!validators_->vertex_attrib_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glVertexAttribPointer", type, "type");
+ return error::kNoError;
+ }
+ if (!validators_->vertex_attrib_size.IsValid(size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "size GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ if (indx >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "index out of range");
+ return error::kNoError;
+ }
+ if (stride < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "stride < 0");
+ return error::kNoError;
+ }
+ if (stride > 255) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "stride > 255");
+ return error::kNoError;
+ }
+ if (offset < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "offset < 0");
+ return error::kNoError;
+ }
+ GLsizei component_size =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type);
+ // component_size must be a power of two to use & as optimized modulo.
+ DCHECK(GLES2Util::IsPOT(component_size));
+ if (offset & (component_size - 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribPointer", "offset not valid for type");
+ return error::kNoError;
+ }
+ if (stride & (component_size - 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribPointer", "stride not valid for type");
+ return error::kNoError;
+ }
+ state_.vertex_attrib_manager
+ ->SetAttribInfo(indx,
+ state_.bound_array_buffer.get(),
+ size,
+ type,
+ normalized,
+ stride,
+ stride != 0 ? stride : component_size * size,
+ offset);
+ if (type != GL_FIXED) {
+ glVertexAttribPointer(indx, size, type, normalized, stride, ptr);
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoViewport(GLint x, GLint y, GLsizei width,
+ GLsizei height) {
+ state_.viewport_x = x;
+ state_.viewport_y = y;
+ state_.viewport_width = std::min(width, viewport_max_width_);
+ state_.viewport_height = std::min(height, viewport_max_height_);
+ glViewport(x, y, width, height);
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttribDivisorANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttribDivisorANGLE& c =
+ *static_cast<const gles2::cmds::VertexAttribDivisorANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribDivisorANGLE", "function not available");
+ return error::kNoError;
+ }
+ GLuint index = c.index;
+ GLuint divisor = c.divisor;
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glVertexAttribDivisorANGLE", "index out of range");
+ return error::kNoError;
+ }
+
+ state_.vertex_attrib_manager->SetDivisor(
+ index,
+ divisor);
+ glVertexAttribDivisorANGLE(index, divisor);
+ return error::kNoError;
+}
+
+template <typename pixel_data_type>
+static void WriteAlphaData(
+ void *pixels, uint32 row_count, uint32 channel_count,
+ uint32 alpha_channel_index, uint32 unpadded_row_size,
+ uint32 padded_row_size, pixel_data_type alpha_value) {
+ DCHECK_GT(channel_count, 0U);
+ DCHECK_EQ(unpadded_row_size % sizeof(pixel_data_type), 0U);
+ uint32 unpadded_row_size_in_elements =
+ unpadded_row_size / sizeof(pixel_data_type);
+ DCHECK_EQ(padded_row_size % sizeof(pixel_data_type), 0U);
+ uint32 padded_row_size_in_elements =
+ padded_row_size / sizeof(pixel_data_type);
+ pixel_data_type* dst =
+ static_cast<pixel_data_type*>(pixels) + alpha_channel_index;
+ for (uint32 yy = 0; yy < row_count; ++yy) {
+ pixel_data_type* end = dst + unpadded_row_size_in_elements;
+ for (pixel_data_type* d = dst; d < end; d += channel_count) {
+ *d = alpha_value;
+ }
+ dst += padded_row_size_in_elements;
+ }
+}
+
+void GLES2DecoderImpl::FinishReadPixels(
+ const cmds::ReadPixels& c,
+ GLuint buffer) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::FinishReadPixels");
+ GLsizei width = c.width;
+ GLsizei height = c.height;
+ GLenum format = c.format;
+ GLenum type = c.type;
+ typedef cmds::ReadPixels::Result Result;
+ uint32 pixels_size;
+ Result* result = NULL;
+ if (c.result_shm_id != 0) {
+ result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ if (buffer != 0) {
+ glDeleteBuffersARB(1, &buffer);
+ }
+ return;
+ }
+ }
+ GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.pack_alignment, &pixels_size,
+ NULL, NULL);
+ void* pixels = GetSharedMemoryAs<void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ if (buffer != 0) {
+ glDeleteBuffersARB(1, &buffer);
+ }
+ return;
+ }
+
+ if (buffer != 0) {
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
+ void* data;
+ if (features().map_buffer_range) {
+ data = glMapBufferRange(
+ GL_PIXEL_PACK_BUFFER_ARB, 0, pixels_size, GL_MAP_READ_BIT);
+ } else {
+ data = glMapBuffer(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
+ }
+ memcpy(pixels, data, pixels_size);
+ // GL_PIXEL_PACK_BUFFER_ARB is currently unused, so we don't
+ // have to restore the state.
+ glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ glDeleteBuffersARB(1, &buffer);
+ }
+
+ if (result != NULL) {
+ *result = true;
+ }
+
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ if ((channels_exist & 0x0008) == 0 &&
+ workarounds().clear_alpha_in_readpixels) {
+ // Set the alpha to 255 because some drivers are buggy in this regard.
+ uint32 temp_size;
+
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, state_.pack_alignment, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ return;
+ }
+
+ uint32 channel_count = 0;
+ uint32 alpha_channel = 0;
+ switch (format) {
+ case GL_RGBA:
+ case GL_BGRA_EXT:
+ channel_count = 4;
+ alpha_channel = 3;
+ break;
+ case GL_ALPHA:
+ channel_count = 1;
+ alpha_channel = 0;
+ break;
+ }
+
+ if (channel_count > 0) {
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ WriteAlphaData<uint8>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 0xFF);
+ break;
+ case GL_FLOAT:
+ WriteAlphaData<float>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 1.0f);
+ break;
+ case GL_HALF_FLOAT:
+ WriteAlphaData<uint16>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 0x3C00);
+ break;
+ }
+ }
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleReadPixels(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReadPixels& c =
+ *static_cast<const gles2::cmds::ReadPixels*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleReadPixels");
+ error::Error fbo_error = WillAccessBoundFramebufferForRead();
+ if (fbo_error != error::kNoError)
+ return fbo_error;
+ GLint x = c.x;
+ GLint y = c.y;
+ GLsizei width = c.width;
+ GLsizei height = c.height;
+ GLenum format = c.format;
+ GLenum type = c.type;
+ GLboolean async = c.async;
+ if (width < 0 || height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
+ return error::kNoError;
+ }
+ typedef cmds::ReadPixels::Result Result;
+ uint32 pixels_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.pack_alignment, &pixels_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ void* pixels = GetSharedMemoryAs<void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ Result* result = NULL;
+ if (c.result_shm_id != 0) {
+ result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ if (!validators_->read_pixel_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glReadPixels", format, "format");
+ return error::kNoError;
+ }
+ if (!validators_->read_pixel_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glReadPixels", type, "type");
+ return error::kNoError;
+ }
+ if ((format != GL_RGBA && format != GL_BGRA_EXT && format != GL_RGB &&
+ format != GL_ALPHA) || type != GL_UNSIGNED_BYTE) {
+ // format and type are acceptable enums but not guaranteed to be supported
+ // for this framebuffer. Have to ask gl if they are valid.
+ GLint preferred_format = 0;
+ DoGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &preferred_format);
+ GLint preferred_type = 0;
+ DoGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &preferred_type);
+ if (format != static_cast<GLenum>(preferred_format) ||
+ type != static_cast<GLenum>(preferred_type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glReadPixels", "format and type incompatible "
+ "with the current read framebuffer");
+ return error::kNoError;
+ }
+ }
+ if (width == 0 || height == 0) {
+ return error::kNoError;
+ }
+
+ // Get the size of the current fbo or backbuffer.
+ gfx::Size max_size = GetBoundReadFrameBufferSize();
+
+ int32 max_x;
+ int32 max_y;
+ if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glReadPixels")) {
+ return error::kNoError;
+ }
+
+ if (!CheckBoundFramebuffersValid("glReadPixels")) {
+ return error::kNoError;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glReadPixels");
+
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+
+ if (x < 0 || y < 0 || max_x > max_size.width() || max_y > max_size.height()) {
+ // The user requested an out of range area. Get the results 1 line
+ // at a time.
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, state_.pack_alignment, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ GLint dest_x_offset = std::max(-x, 0);
+ uint32 dest_row_offset;
+ if (!GLES2Util::ComputeImageDataSizes(
+ dest_x_offset, 1, format, type, state_.pack_alignment, &dest_row_offset,
+ NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ // Copy each row into the larger dest rect.
+ int8* dst = static_cast<int8*>(pixels);
+ GLint read_x = std::max(0, x);
+ GLint read_end_x = std::max(0, std::min(max_size.width(), max_x));
+ GLint read_width = read_end_x - read_x;
+ for (GLint yy = 0; yy < height; ++yy) {
+ GLint ry = y + yy;
+
+ // Clear the row.
+ memset(dst, 0, unpadded_row_size);
+
+ // If the row is in range, copy it.
+ if (ry >= 0 && ry < max_size.height() && read_width > 0) {
+ glReadPixels(
+ read_x, ry, read_width, 1, format, type, dst + dest_row_offset);
+ }
+ dst += padded_row_size;
+ }
+ } else {
+ if (async && features().use_async_readpixels) {
+ GLuint buffer;
+ glGenBuffersARB(1, &buffer);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
+ glBufferData(GL_PIXEL_PACK_BUFFER_ARB, pixels_size, NULL, GL_STREAM_READ);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ glReadPixels(x, y, width, height, format, type, 0);
+ pending_readpixel_fences_.push(linked_ptr<FenceCallback>(
+ new FenceCallback()));
+ WaitForReadPixels(base::Bind(
+ &GLES2DecoderImpl::FinishReadPixels,
+ base::internal::SupportsWeakPtrBase::StaticAsWeakPtr
+ <GLES2DecoderImpl>(this),
+ c, buffer));
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ return error::kNoError;
+ } else {
+ // On error, unbind pack buffer and fall through to sync readpixels
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ }
+ }
+ glReadPixels(x, y, width, height, format, type, pixels);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glReadPixels");
+ if (error == GL_NO_ERROR) {
+ if (result != NULL) {
+ *result = true;
+ }
+ FinishReadPixels(c, 0);
+ }
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePixelStorei(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PixelStorei& c =
+ *static_cast<const gles2::cmds::PixelStorei*>(cmd_data);
+ GLenum pname = c.pname;
+ GLenum param = c.param;
+ if (!validators_->pixel_store.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glPixelStorei", pname, "pname");
+ return error::kNoError;
+ }
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ case GL_UNPACK_ALIGNMENT:
+ if (!validators_->pixel_store_alignment.IsValid(param)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glPixelStorei", "param GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ break;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ unpack_flip_y_ = (param != 0);
+ return error::kNoError;
+ case GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM:
+ unpack_premultiply_alpha_ = (param != 0);
+ return error::kNoError;
+ case GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM:
+ unpack_unpremultiply_alpha_ = (param != 0);
+ return error::kNoError;
+ default:
+ break;
+ }
+ glPixelStorei(pname, param);
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ state_.pack_alignment = param;
+ break;
+ case GL_PACK_REVERSE_ROW_ORDER_ANGLE:
+ state_.pack_reverse_row_order = (param != 0);
+ break;
+ case GL_UNPACK_ALIGNMENT:
+ state_.unpack_alignment = param;
+ break;
+ default:
+ // Validation should have prevented us from getting here.
+ NOTREACHED();
+ break;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PostSubBufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::PostSubBufferCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandlePostSubBufferCHROMIUM");
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("gpu.PresentingFrame");
+ }
+ if (!supports_post_sub_buffer_) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glPostSubBufferCHROMIUM", "command not supported by surface");
+ return error::kNoError;
+ }
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ &is_tracing);
+ if (is_tracing) {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ ScopedFrameBufferBinder binder(this, GetBackbufferServiceId());
+ gpu_state_tracer_->TakeSnapshotWithCurrentFramebuffer(
+ is_offscreen ? offscreen_size_ : surface_->GetSize());
+ }
+ if (surface_->PostSubBuffer(c.x, c.y, c.width, c.height)) {
+ return error::kNoError;
+ } else {
+ LOG(ERROR) << "Context lost because PostSubBuffer failed.";
+ return error::kLostContext;
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleScheduleOverlayPlaneCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ScheduleOverlayPlaneCHROMIUM& c =
+ *static_cast<const gles2::cmds::ScheduleOverlayPlaneCHROMIUM*>(cmd_data);
+ TextureRef* ref = texture_manager()->GetTexture(c.overlay_texture_id);
+ if (!ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "unknown texture");
+ return error::kNoError;
+ }
+ gfx::GLImage* image =
+ ref->texture()->GetLevelImage(ref->texture()->target(), 0);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "unsupported texture format");
+ return error::kNoError;
+ }
+ gfx::OverlayTransform transform = GetGFXOverlayTransform(c.plane_transform);
+ if (transform == gfx::OVERLAY_TRANSFORM_INVALID) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "invalid transform enum");
+ return error::kNoError;
+ }
+ if (!surface_->ScheduleOverlayPlane(
+ c.plane_z_order,
+ transform,
+ image,
+ gfx::Rect(c.bounds_x, c.bounds_y, c.bounds_width, c.bounds_height),
+ gfx::RectF(c.uv_x, c.uv_y, c.uv_width, c.uv_height))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "failed to schedule overlay");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::GetAttribLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str) {
+ if (!StringIsValidForGLES(name_str.c_str())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetAttribLocation", "Invalid character");
+ return error::kNoError;
+ }
+ Program* program = GetProgramInfoNotShader(
+ client_id, "glGetAttribLocation");
+ if (!program) {
+ return error::kNoError;
+ }
+ if (!program->IsValid()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetAttribLocation", "program not linked");
+ return error::kNoError;
+ }
+ GLint* location = GetSharedMemoryAs<GLint*>(
+ location_shm_id, location_shm_offset, sizeof(GLint));
+ if (!location) {
+ return error::kOutOfBounds;
+ }
+ // Require the client to init this incase the context is lost and we are no
+ // longer executing commands.
+ if (*location != -1) {
+ return error::kGenericError;
+ }
+ *location = program->GetAttribLocation(name_str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetAttribLocation(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetAttribLocation& c =
+ *static_cast<const gles2::cmds::GetAttribLocation*>(cmd_data);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ return GetAttribLocationHelper(
+ c.program, c.location_shm_id, c.location_shm_offset, name_str);
+}
+
+error::Error GLES2DecoderImpl::GetUniformLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str) {
+ if (!StringIsValidForGLES(name_str.c_str())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetUniformLocation", "Invalid character");
+ return error::kNoError;
+ }
+ Program* program = GetProgramInfoNotShader(
+ client_id, "glGetUniformLocation");
+ if (!program) {
+ return error::kNoError;
+ }
+ if (!program->IsValid()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniformLocation", "program not linked");
+ return error::kNoError;
+ }
+ GLint* location = GetSharedMemoryAs<GLint*>(
+ location_shm_id, location_shm_offset, sizeof(GLint));
+ if (!location) {
+ return error::kOutOfBounds;
+ }
+ // Require the client to init this incase the context is lost an we are no
+ // longer executing commands.
+ if (*location != -1) {
+ return error::kGenericError;
+ }
+ *location = program->GetUniformFakeLocation(name_str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformLocation(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformLocation& c =
+ *static_cast<const gles2::cmds::GetUniformLocation*>(cmd_data);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ return GetUniformLocationHelper(
+ c.program, c.location_shm_id, c.location_shm_offset, name_str);
+}
+
+error::Error GLES2DecoderImpl::HandleGetString(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetString& c =
+ *static_cast<const gles2::cmds::GetString*>(cmd_data);
+ GLenum name = static_cast<GLenum>(c.name);
+ if (!validators_->string_type.IsValid(name)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetString", name, "name");
+ return error::kNoError;
+ }
+ const char* str = reinterpret_cast<const char*>(glGetString(name));
+ std::string extensions;
+ switch (name) {
+ case GL_VERSION:
+ str = "OpenGL ES 2.0 Chromium";
+ break;
+ case GL_SHADING_LANGUAGE_VERSION:
+ str = "OpenGL ES GLSL ES 1.0 Chromium";
+ break;
+ case GL_RENDERER:
+ case GL_VENDOR:
+ // Return the unmasked VENDOR/RENDERER string for WebGL contexts.
+ // They are used by WEBGL_debug_renderer_info.
+ if (!force_webgl_glsl_validation_)
+ str = "Chromium";
+ break;
+ case GL_EXTENSIONS:
+ {
+ // For WebGL contexts, strip out the OES derivatives and
+ // EXT frag depth extensions if they have not been enabled.
+ if (force_webgl_glsl_validation_) {
+ extensions = feature_info_->extensions();
+ if (!derivatives_explicitly_enabled_) {
+ size_t offset = extensions.find(kOESDerivativeExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kOESDerivativeExtension),
+ std::string());
+ }
+ }
+ if (!frag_depth_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTFragDepthExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kEXTFragDepthExtension),
+ std::string());
+ }
+ }
+ if (!draw_buffers_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTDrawBuffersExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kEXTDrawBuffersExtension),
+ std::string());
+ }
+ }
+ if (!shader_texture_lod_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTShaderTextureLodExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset,
+ arraysize(kEXTShaderTextureLodExtension),
+ std::string());
+ }
+ }
+ } else {
+ extensions = feature_info_->extensions().c_str();
+ }
+ if (supports_post_sub_buffer_)
+ extensions += " GL_CHROMIUM_post_sub_buffer";
+ str = extensions.c_str();
+ }
+ break;
+ default:
+ break;
+ }
+ Bucket* bucket = CreateBucket(c.bucket_id);
+ bucket->SetFromString(str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBufferData(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BufferData& c =
+ *static_cast<const gles2::cmds::BufferData*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32 data_shm_id = static_cast<uint32>(c.data_shm_id);
+ uint32 data_shm_offset = static_cast<uint32>(c.data_shm_offset);
+ GLenum usage = static_cast<GLenum>(c.usage);
+ const void* data = NULL;
+ if (data_shm_id != 0 || data_shm_offset != 0) {
+ data = GetSharedMemoryAs<const void*>(data_shm_id, data_shm_offset, size);
+ if (!data) {
+ return error::kOutOfBounds;
+ }
+ }
+ buffer_manager()->ValidateAndDoBufferData(&state_, target, size, data, usage);
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoBufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data) {
+ // Just delegate it. Some validation is actually done before this.
+ buffer_manager()->ValidateAndDoBufferSubData(
+ &state_, target, offset, size, data);
+}
+
+bool GLES2DecoderImpl::ClearLevel(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) {
+ uint32 channels = GLES2Util::GetChannelsForFormat(format);
+ if (feature_info_->feature_flags().angle_depth_texture &&
+ (channels & GLES2Util::kDepth) != 0) {
+ // It's a depth format and ANGLE doesn't allow texImage2D or texSubImage2D
+ // on depth formats.
+ GLuint fb = 0;
+ glGenFramebuffersEXT(1, &fb);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, fb);
+
+ bool have_stencil = (channels & GLES2Util::kStencil) != 0;
+ GLenum attachment = have_stencil ? GL_DEPTH_STENCIL_ATTACHMENT :
+ GL_DEPTH_ATTACHMENT;
+
+ glFramebufferTexture2DEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, attachment, target, service_id, level);
+ // ANGLE promises a depth only attachment ok.
+ if (glCheckFramebufferStatusEXT(GL_DRAW_FRAMEBUFFER_EXT) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ return false;
+ }
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_DEPTH_BUFFER_BIT | (have_stencil ? GL_STENCIL_BUFFER_BIT : 0));
+
+ RestoreClearState();
+
+ glDeleteFramebuffersEXT(1, &fb);
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ GLuint fb_service_id =
+ framebuffer ? framebuffer->service_id() : GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, fb_service_id);
+ return true;
+ }
+
+ static const uint32 kMaxZeroSize = 1024 * 1024 * 4;
+
+ uint32 size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &size,
+ NULL, &padded_row_size)) {
+ return false;
+ }
+
+ TRACE_EVENT1("gpu", "GLES2DecoderImpl::ClearLevel", "size", size);
+
+ int tile_height;
+
+ if (size > kMaxZeroSize) {
+ if (kMaxZeroSize < padded_row_size) {
+ // That'd be an awfully large texture.
+ return false;
+ }
+ // We should never have a large total size with a zero row size.
+ DCHECK_GT(padded_row_size, 0U);
+ tile_height = kMaxZeroSize / padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, tile_height, format, type, state_.unpack_alignment, &size,
+ NULL, NULL)) {
+ return false;
+ }
+ } else {
+ tile_height = height;
+ }
+
+ // Assumes the size has already been checked.
+ scoped_ptr<char[]> zero(new char[size]);
+ memset(zero.get(), 0, size);
+ glBindTexture(bind_target, service_id);
+
+ GLint y = 0;
+ while (y < height) {
+ GLint h = y + tile_height > height ? height - y : tile_height;
+ if (is_texture_immutable || h != height) {
+ glTexSubImage2D(target, level, 0, y, width, h, format, type, zero.get());
+ } else {
+ glTexImage2D(
+ target, level, internal_format, width, h, 0, format, type,
+ zero.get());
+ }
+ y += tile_height;
+ }
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, bind_target);
+ glBindTexture(bind_target, texture ? texture->service_id() : 0);
+ return true;
+}
+
+namespace {
+
+const int kS3TCBlockWidth = 4;
+const int kS3TCBlockHeight = 4;
+const int kS3TCDXT1BlockSize = 8;
+const int kS3TCDXT3AndDXT5BlockSize = 16;
+
+bool IsValidDXTSize(GLint level, GLsizei size) {
+ return (size == 1) ||
+ (size == 2) || !(size % kS3TCBlockWidth);
+}
+
+bool IsValidPVRTCSize(GLint level, GLsizei size) {
+ return GLES2Util::IsPOT(size);
+}
+
+} // anonymous namespace.
+
+bool GLES2DecoderImpl::ValidateCompressedTexFuncData(
+ const char* function_name,
+ GLsizei width, GLsizei height, GLenum format, size_t size) {
+ unsigned int bytes_required = 0;
+
+ switch (format) {
+ case GL_ATC_RGB_AMD:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_ETC1_RGB8_OES: {
+ int num_blocks_across =
+ (width + kS3TCBlockWidth - 1) / kS3TCBlockWidth;
+ int num_blocks_down =
+ (height + kS3TCBlockHeight - 1) / kS3TCBlockHeight;
+ int num_blocks = num_blocks_across * num_blocks_down;
+ bytes_required = num_blocks * kS3TCDXT1BlockSize;
+ break;
+ }
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ int num_blocks_across =
+ (width + kS3TCBlockWidth - 1) / kS3TCBlockWidth;
+ int num_blocks_down =
+ (height + kS3TCBlockHeight - 1) / kS3TCBlockHeight;
+ int num_blocks = num_blocks_across * num_blocks_down;
+ bytes_required = num_blocks * kS3TCDXT3AndDXT5BlockSize;
+ break;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG: {
+ bytes_required = (std::max(width, 8) * std::max(height, 8) * 4 + 7)/8;
+ break;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ bytes_required = (std::max(width, 16) * std::max(height, 8) * 2 + 7)/8;
+ break;
+ }
+ default:
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, format, "format");
+ return false;
+ }
+
+ if (size != bytes_required) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "size is not correct for dimensions");
+ return false;
+ }
+
+ return true;
+}
+
+bool GLES2DecoderImpl::ValidateCompressedTexDimensions(
+ const char* function_name,
+ GLint level, GLsizei width, GLsizei height, GLenum format) {
+ switch (format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ if (!IsValidDXTSize(level, width) || !IsValidDXTSize(level, height)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ case GL_ETC1_RGB8_OES: {
+ if (width <= 0 || height <= 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ if (!IsValidPVRTCSize(level, width) ||
+ !IsValidPVRTCSize(level, height)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+bool GLES2DecoderImpl::ValidateCompressedTexSubDimensions(
+ const char* function_name,
+ GLenum target, GLint level, GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height, GLenum format,
+ Texture* texture) {
+ if (xoffset < 0 || yoffset < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "xoffset or yoffset < 0");
+ return false;
+ }
+
+ switch (format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ const int kBlockWidth = 4;
+ const int kBlockHeight = 4;
+ if ((xoffset % kBlockWidth) || (yoffset % kBlockHeight)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "xoffset or yoffset not multiple of 4");
+ return false;
+ }
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ if (!texture->GetLevelSize(target, level, &tex_width, &tex_height) ||
+ width - xoffset > tex_width ||
+ height - yoffset > tex_height) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "dimensions out of range");
+ return false;
+ }
+ return ValidateCompressedTexDimensions(
+ function_name, level, width, height, format);
+ }
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD: {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "not supported for ATC textures");
+ return false;
+ }
+ case GL_ETC1_RGB8_OES: {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "not supported for ECT1_RGB8_OES textures");
+ return false;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ if ((xoffset != 0) || (yoffset != 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "xoffset and yoffset must be zero");
+ return false;
+ }
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ if (!texture->GetLevelSize(target, level, &tex_width, &tex_height) ||
+ width != tex_width ||
+ height != tex_height) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "dimensions must match existing texture level dimensions");
+ return false;
+ }
+ return ValidateCompressedTexDimensions(
+ function_name, level, width, height, format);
+ }
+ default:
+ return false;
+ }
+}
+
+error::Error GLES2DecoderImpl::DoCompressedTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei image_size,
+ const void* data) {
+ // TODO(gman): Validate image_size is correct for width, height and format.
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(
+ internal_format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexImage2D", internal_format, "internal_format");
+ return error::kNoError;
+ }
+ if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
+ border != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCompressedTexImage2D", "dimensions out of range");
+ return error::kNoError;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCompressedTexImage2D", "unknown texture target");
+ return error::kNoError;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexImage2D", "texture is immutable");
+ return error::kNoError;
+ }
+
+ if (!ValidateCompressedTexDimensions(
+ "glCompressedTexImage2D", level, width, height, internal_format) ||
+ !ValidateCompressedTexFuncData(
+ "glCompressedTexImage2D", width, height, internal_format, image_size)) {
+ return error::kNoError;
+ }
+
+ if (!EnsureGPUMemoryAvailable(image_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCompressedTexImage2D", "out of memory");
+ return error::kNoError;
+ }
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ scoped_ptr<int8[]> zero;
+ if (!data) {
+ zero.reset(new int8[image_size]);
+ memset(zero.get(), 0, image_size);
+ data = zero.get();
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCompressedTexImage2D");
+ glCompressedTexImage2D(
+ target, level, internal_format, width, height, border, image_size, data);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCompressedTexImage2D");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, level, internal_format,
+ width, height, 1, border, 0, 0, true);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexImage2D(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexImage2D& c =
+ *static_cast<const gles2::cmds::CompressedTexImage2D*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLsizei image_size = static_cast<GLsizei>(c.imageSize);
+ uint32 data_shm_id = static_cast<uint32>(c.data_shm_id);
+ uint32 data_shm_offset = static_cast<uint32>(c.data_shm_offset);
+ const void* data = NULL;
+ if (data_shm_id != 0 || data_shm_offset != 0) {
+ data = GetSharedMemoryAs<const void*>(
+ data_shm_id, data_shm_offset, image_size);
+ if (!data) {
+ return error::kOutOfBounds;
+ }
+ }
+ return DoCompressedTexImage2D(
+ target, level, internal_format, width, height, border, image_size, data);
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexImage2DBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexImage2DBucket& c =
+ *static_cast<const gles2::cmds::CompressedTexImage2DBucket*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 data_size = bucket->size();
+ GLsizei imageSize = data_size;
+ const void* data = bucket->GetData(0, data_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ return DoCompressedTexImage2D(
+ target, level, internal_format, width, height, border,
+ imageSize, data);
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexSubImage2DBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexSubImage2DBucket& c =
+ *static_cast<const gles2::cmds::CompressedTexSubImage2DBucket*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 data_size = bucket->size();
+ GLsizei imageSize = data_size;
+ const void* data = bucket->GetData(0, data_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM, "glCompressedTexSubImage2D", "target");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", format, "format");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ if (imageSize < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "imageSize < 0");
+ return error::kNoError;
+ }
+ DoCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexImage2D(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexImage2D& c =
+ *static_cast<const gles2::cmds::TexImage2D*>(cmd_data);
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexImage2D",
+ "width", c.width, "height", c.height);
+ // Set as failed for now, but if it successed, this will be set to not failed.
+ texture_state_.tex_image_2d_failed = true;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ // TODO(kloveless): Change TexImage2D command to use unsigned integer
+ // for internalformat.
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
+ uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
+ uint32 pixels_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size, NULL,
+ NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = NULL;
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAs<const void*>(
+ pixels_shm_id, pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ TextureManager::DoTextImage2DArguments args = {
+ target, level, internal_format, width, height, border, format, type,
+ pixels, pixels_size};
+ texture_manager()->ValidateAndDoTexImage2D(
+ &texture_state_, &state_, &framebuffer_state_, args);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoCompressedTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei image_size,
+ const void * data) {
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum type = 0;
+ GLenum internal_format = 0;
+ if (!texture->GetLevelType(target, level, &type, &internal_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "level does not exist.");
+ return;
+ }
+ if (internal_format != format) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "format does not match internal format.");
+ return;
+ }
+ if (!texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "bad dimensions.");
+ return;
+ }
+
+ if (!ValidateCompressedTexFuncData(
+ "glCompressedTexSubImage2D", width, height, format, image_size) ||
+ !ValidateCompressedTexSubDimensions(
+ "glCompressedTexSubImage2D",
+ target, level, xoffset, yoffset, width, height, format, texture)) {
+ return;
+ }
+
+
+ // Note: There is no need to deal with texture cleared tracking here
+ // because the validation above means you can only get here if the level
+ // is already a matching compressed format and in that case
+ // CompressedTexImage2D already cleared the texture.
+ glCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, image_size, data);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+static void Clip(
+ GLint start, GLint range, GLint sourceRange,
+ GLint* out_start, GLint* out_range) {
+ DCHECK(out_start);
+ DCHECK(out_range);
+ if (start < 0) {
+ range += start;
+ start = 0;
+ }
+ GLint end = start + range;
+ if (end > sourceRange) {
+ range -= end - sourceRange;
+ }
+ *out_start = start;
+ *out_range = range;
+}
+
+void GLES2DecoderImpl::DoCopyTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ DCHECK(!ShouldDeferReads());
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexImage2D", "texture is immutable");
+ return;
+ }
+ if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
+ border != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexImage2D", "dimensions out of range");
+ return;
+ }
+ if (!texture_manager()->ValidateFormatAndTypeCombination(
+ state_.GetErrorState(), "glCopyTexImage2D", internal_format,
+ GL_UNSIGNED_BYTE)) {
+ return;
+ }
+
+ // Check we have compatible formats.
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ uint32 channels_needed = GLES2Util::GetChannelsForFormat(internal_format);
+
+ if ((channels_needed & channels_exist) != channels_needed) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexImage2D", "incompatible format");
+ return;
+ }
+
+ if ((channels_needed & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexImage2D", "can not be used with depth or stencil textures");
+ return;
+ }
+
+ uint32 estimated_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, internal_format, GL_UNSIGNED_BYTE, state_.unpack_alignment,
+ &estimated_size, NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexImage2D", "dimensions too large");
+ return;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopyTexImage2D", "out of memory");
+ return;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glCopyTexImage2D")) {
+ return;
+ }
+
+ if (!CheckBoundFramebuffersValid("glCopyTexImage2D")) {
+ return;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTexImage2D");
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+ gfx::Size size = GetBoundReadFrameBufferSize();
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ // Clip to size to source dimensions
+ GLint copyX = 0;
+ GLint copyY = 0;
+ GLint copyWidth = 0;
+ GLint copyHeight = 0;
+ Clip(x, width, size.width(), ©X, ©Width);
+ Clip(y, height, size.height(), ©Y, ©Height);
+
+ if (copyX != x ||
+ copyY != y ||
+ copyWidth != width ||
+ copyHeight != height) {
+ // some part was clipped so clear the texture.
+ if (!ClearLevel(
+ texture->service_id(), texture->target(),
+ target, level, internal_format, internal_format, GL_UNSIGNED_BYTE,
+ width, height, texture->IsImmutable())) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexImage2D", "dimensions too big");
+ return;
+ }
+ if (copyHeight > 0 && copyWidth > 0) {
+ GLint dx = copyX - x;
+ GLint dy = copyY - y;
+ GLint destX = dx;
+ GLint destY = dy;
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexSubImage2D(target, level,
+ destX, destY, copyX, copyY,
+ copyWidth, copyHeight);
+ }
+ } else {
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexImage2D(target, level, internal_format,
+ copyX, copyY, copyWidth, copyHeight, border);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCopyTexImage2D");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, level, internal_format, width, height, 1,
+ border, internal_format, GL_UNSIGNED_BYTE, true);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+void GLES2DecoderImpl::DoCopyTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ DCHECK(!ShouldDeferReads());
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexSubImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum type = 0;
+ GLenum format = 0;
+ if (!texture->GetLevelType(target, level, &type, &format) ||
+ !texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexSubImage2D", "bad dimensions.");
+ return;
+ }
+ if (async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexSubImage2D", "async upload pending for texture");
+ return;
+ }
+
+ // Check we have compatible formats.
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ uint32 channels_needed = GLES2Util::GetChannelsForFormat(format);
+
+ if (!channels_needed ||
+ (channels_needed & channels_exist) != channels_needed) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexSubImage2D", "incompatible format");
+ return;
+ }
+
+ if ((channels_needed & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopySubImage2D", "can not be used with depth or stencil textures");
+ return;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glCopyTexSubImage2D")) {
+ return;
+ }
+
+ if (!CheckBoundFramebuffersValid("glCopyTexSubImage2D")) {
+ return;
+ }
+
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+ gfx::Size size = GetBoundReadFrameBufferSize();
+ GLint copyX = 0;
+ GLint copyY = 0;
+ GLint copyWidth = 0;
+ GLint copyHeight = 0;
+ Clip(x, width, size.width(), ©X, ©Width);
+ Clip(y, height, size.height(), ©Y, ©Height);
+
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexSubImage2D", "dimensions too big");
+ return;
+ }
+
+ if (copyX != x ||
+ copyY != y ||
+ copyWidth != width ||
+ copyHeight != height) {
+ // some part was clipped so clear the sub rect.
+ uint32 pixels_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size,
+ NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexSubImage2D", "dimensions too large");
+ return;
+ }
+ scoped_ptr<char[]> zero(new char[pixels_size]);
+ memset(zero.get(), 0, pixels_size);
+ ScopedModifyPixels modify(texture_ref);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height,
+ format, type, zero.get());
+ }
+
+ if (copyHeight > 0 && copyWidth > 0) {
+ GLint dx = copyX - x;
+ GLint dy = copyY - y;
+ GLint destX = xoffset + dx;
+ GLint destY = yoffset + dy;
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexSubImage2D(target, level,
+ destX, destY, copyX, copyY,
+ copyWidth, copyHeight);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+bool GLES2DecoderImpl::ValidateTexSubImage2D(
+ error::Error* error,
+ const char* function_name,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data) {
+ (*error) = error::kNoError;
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, target, "target");
+ return false;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "width < 0");
+ return false;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "height < 0");
+ return false;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "unknown texture for target");
+ return false;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum current_type = 0;
+ GLenum internal_format = 0;
+ if (!texture->GetLevelType(target, level, ¤t_type, &internal_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "level does not exist.");
+ return false;
+ }
+ if (!texture_manager()->ValidateTextureParameters(state_.GetErrorState(),
+ function_name, format, type, internal_format, level)) {
+ return false;
+ }
+ if (type != current_type) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "type does not match type of texture.");
+ return false;
+ }
+ if (async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "async upload pending for texture");
+ return false;
+ }
+ if (!texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "bad dimensions.");
+ return false;
+ }
+ if ((GLES2Util::GetChannelsForFormat(format) &
+ (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "can not supply data for depth or stencil textures");
+ return false;
+ }
+ if (data == NULL) {
+ (*error) = error::kOutOfBounds;
+ return false;
+ }
+ return true;
+}
+
+error::Error GLES2DecoderImpl::DoTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data) {
+ error::Error error = error::kNoError;
+ if (!ValidateTexSubImage2D(&error, "glTexSubImage2D", target, level,
+ xoffset, yoffset, width, height, format, type, data)) {
+ return error;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ Texture* texture = texture_ref->texture();
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ bool ok = texture->GetLevelSize(target, level, &tex_width, &tex_height);
+ DCHECK(ok);
+ if (xoffset != 0 || yoffset != 0 ||
+ width != tex_width || height != tex_height) {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref,
+ target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexSubImage2D", "dimensions too big");
+ return error::kNoError;
+ }
+ ScopedTextureUploadTimer timer(&texture_state_);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+ return error::kNoError;
+ }
+
+ if (!texture_state_.texsubimage2d_faster_than_teximage2d &&
+ !texture->IsImmutable()) {
+ ScopedTextureUploadTimer timer(&texture_state_);
+ GLenum internal_format;
+ GLenum tex_type;
+ texture->GetLevelType(target, level, &tex_type, &internal_format);
+ // NOTE: In OpenGL ES 2.0 border is always zero. If that changes we'll need
+ // to look it up.
+ glTexImage2D(
+ target, level, internal_format, width, height, 0, format, type, data);
+ } else {
+ ScopedTextureUploadTimer timer(&texture_state_);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+ }
+ texture_manager()->SetLevelCleared(texture_ref, target, level, true);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexSubImage2D(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexSubImage2D& c =
+ *static_cast<const gles2::cmds::TexSubImage2D*>(cmd_data);
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexSubImage2D",
+ "width", c.width, "height", c.height);
+ GLboolean internal = static_cast<GLboolean>(c.internal);
+ if (internal == GL_TRUE && texture_state_.tex_image_2d_failed)
+ return error::kNoError;
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 data_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &data_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = GetSharedMemoryAs<const void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, data_size);
+ return DoTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribPointerv(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribPointerv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribPointerv*>(cmd_data);
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribPointerv::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.pointer_shm_id, c.pointer_shm_offset, Result::ComputeSize(1));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->vertex_pointer.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetVertexAttribPointerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribPointerv", "index out of range.");
+ return error::kNoError;
+ }
+ result->SetNumResults(1);
+ *result->GetData() =
+ state_.vertex_attrib_manager->GetVertexAttrib(index)->offset();
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GetUniformSetup(
+ GLuint program_id, GLint fake_location,
+ uint32 shm_id, uint32 shm_offset,
+ error::Error* error, GLint* real_location,
+ GLuint* service_id, void** result_pointer, GLenum* result_type) {
+ DCHECK(error);
+ DCHECK(service_id);
+ DCHECK(result_pointer);
+ DCHECK(result_type);
+ DCHECK(real_location);
+ *error = error::kNoError;
+ // Make sure we have enough room for the result on failure.
+ SizedResult<GLint>* result;
+ result = GetSharedMemoryAs<SizedResult<GLint>*>(
+ shm_id, shm_offset, SizedResult<GLint>::ComputeSize(0));
+ if (!result) {
+ *error = error::kOutOfBounds;
+ return false;
+ }
+ *result_pointer = result;
+ // Set the result size to 0 so the client does not have to check for success.
+ result->SetNumResults(0);
+ Program* program = GetProgramInfoNotShader(program_id, "glGetUniform");
+ if (!program) {
+ return false;
+ }
+ if (!program->IsValid()) {
+ // Program was not linked successfully. (ie, glLinkProgram)
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniform", "program not linked");
+ return false;
+ }
+ *service_id = program->service_id();
+ GLint array_index = -1;
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfoByFakeLocation(
+ fake_location, real_location, &array_index);
+ if (!uniform_info) {
+ // No such location.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniform", "unknown location");
+ return false;
+ }
+ GLenum type = uniform_info->type;
+ GLsizei size = GLES2Util::GetGLDataTypeSizeForUniforms(type);
+ if (size == 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glGetUniform", "unknown type");
+ return false;
+ }
+ result = GetSharedMemoryAs<SizedResult<GLint>*>(
+ shm_id, shm_offset, SizedResult<GLint>::ComputeSizeFromBytes(size));
+ if (!result) {
+ *error = error::kOutOfBounds;
+ return false;
+ }
+ result->size = size;
+ *result_type = type;
+ return true;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformiv(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformiv& c =
+ *static_cast<const gles2::cmds::GetUniformiv*>(cmd_data);
+ GLuint program = c.program;
+ GLint fake_location = c.location;
+ GLuint service_id;
+ GLenum result_type;
+ GLint real_location = -1;
+ Error error;
+ void* result;
+ if (GetUniformSetup(
+ program, fake_location, c.params_shm_id, c.params_shm_offset,
+ &error, &real_location, &service_id, &result, &result_type)) {
+ glGetUniformiv(
+ service_id, real_location,
+ static_cast<cmds::GetUniformiv::Result*>(result)->GetData());
+ }
+ return error;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformfv(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformfv& c =
+ *static_cast<const gles2::cmds::GetUniformfv*>(cmd_data);
+ GLuint program = c.program;
+ GLint fake_location = c.location;
+ GLuint service_id;
+ GLint real_location = -1;
+ Error error;
+ typedef cmds::GetUniformfv::Result Result;
+ Result* result;
+ GLenum result_type;
+ if (GetUniformSetup(
+ program, fake_location, c.params_shm_id, c.params_shm_offset,
+ &error, &real_location, &service_id,
+ reinterpret_cast<void**>(&result), &result_type)) {
+ if (result_type == GL_BOOL || result_type == GL_BOOL_VEC2 ||
+ result_type == GL_BOOL_VEC3 || result_type == GL_BOOL_VEC4) {
+ GLsizei num_values = result->GetNumResults();
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ glGetUniformiv(service_id, real_location, temp.get());
+ GLfloat* dst = result->GetData();
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ dst[ii] = (temp[ii] != 0);
+ }
+ } else {
+ glGetUniformfv(service_id, real_location, result->GetData());
+ }
+ }
+ return error;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderPrecisionFormat(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderPrecisionFormat& c =
+ *static_cast<const gles2::cmds::GetShaderPrecisionFormat*>(cmd_data);
+ GLenum shader_type = static_cast<GLenum>(c.shadertype);
+ GLenum precision_type = static_cast<GLenum>(c.precisiontype);
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->shader_type.IsValid(shader_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetShaderPrecisionFormat", shader_type, "shader_type");
+ return error::kNoError;
+ }
+ if (!validators_->shader_precision.IsValid(precision_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetShaderPrecisionFormat", precision_type, "precision_type");
+ return error::kNoError;
+ }
+
+ result->success = 1; // true
+
+ GLint range[2] = { 0, 0 };
+ GLint precision = 0;
+ GetShaderPrecisionFormatImpl(shader_type, precision_type, range, &precision);
+
+ result->min_range = range[0];
+ result->max_range = range[1];
+ result->precision = precision;
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetAttachedShaders(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetAttachedShaders& c =
+ *static_cast<const gles2::cmds::GetAttachedShaders*>(cmd_data);
+ uint32 result_size = c.result_size;
+ GLuint program_id = static_cast<GLuint>(c.program);
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetAttachedShaders");
+ if (!program) {
+ return error::kNoError;
+ }
+ typedef cmds::GetAttachedShaders::Result Result;
+ uint32 max_count = Result::ComputeMaxResults(result_size);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, Result::ComputeSize(max_count));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ GLsizei count = 0;
+ glGetAttachedShaders(
+ program->service_id(), max_count, &count, result->GetData());
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (!shader_manager()->GetClientId(result->GetData()[ii],
+ &result->GetData()[ii])) {
+ NOTREACHED();
+ return error::kGenericError;
+ }
+ }
+ result->SetNumResults(count);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetActiveUniform(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetActiveUniform& c =
+ *static_cast<const gles2::cmds::GetActiveUniform*>(cmd_data);
+ GLuint program_id = c.program;
+ GLuint index = c.index;
+ uint32 name_bucket_id = c.name_bucket_id;
+ typedef cmds::GetActiveUniform::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetActiveUniform");
+ if (!program) {
+ return error::kNoError;
+ }
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfo(index);
+ if (!uniform_info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetActiveUniform", "index out of range");
+ return error::kNoError;
+ }
+ result->success = 1; // true.
+ result->size = uniform_info->size;
+ result->type = uniform_info->type;
+ Bucket* bucket = CreateBucket(name_bucket_id);
+ bucket->SetFromString(uniform_info->name.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetActiveAttrib(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetActiveAttrib& c =
+ *static_cast<const gles2::cmds::GetActiveAttrib*>(cmd_data);
+ GLuint program_id = c.program;
+ GLuint index = c.index;
+ uint32 name_bucket_id = c.name_bucket_id;
+ typedef cmds::GetActiveAttrib::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetActiveAttrib");
+ if (!program) {
+ return error::kNoError;
+ }
+ const Program::VertexAttrib* attrib_info =
+ program->GetAttribInfo(index);
+ if (!attrib_info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetActiveAttrib", "index out of range");
+ return error::kNoError;
+ }
+ result->success = 1; // true.
+ result->size = attrib_info->size;
+ result->type = attrib_info->type;
+ Bucket* bucket = CreateBucket(name_bucket_id);
+ bucket->SetFromString(attrib_info->name.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleShaderBinary(uint32 immediate_data_size,
+ const void* cmd_data) {
+#if 1 // No binary shader support.
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glShaderBinary", "not supported");
+ return error::kNoError;
+#else
+ GLsizei n = static_cast<GLsizei>(c.n);
+ if (n < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "n < 0");
+ return error::kNoError;
+ }
+ GLsizei length = static_cast<GLsizei>(c.length);
+ if (length < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "length < 0");
+ return error::kNoError;
+ }
+ uint32 data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
+ c.shaders_shm_id, c.shaders_shm_offset, data_size);
+ GLenum binaryformat = static_cast<GLenum>(c.binaryformat);
+ const void* binary = GetSharedMemoryAs<const void*>(
+ c.binary_shm_id, c.binary_shm_offset, length);
+ if (shaders == NULL || binary == NULL) {
+ return error::kOutOfBounds;
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Shader* shader = GetShader(shaders[ii]);
+ if (!shader) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "unknown shader");
+ return error::kNoError;
+ }
+ service_ids[ii] = shader->service_id();
+ }
+ // TODO(gman): call glShaderBinary
+ return error::kNoError;
+#endif
+}
+
+void GLES2DecoderImpl::DoSwapBuffers() {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+
+ int this_frame_number = frame_number_++;
+ // TRACE_EVENT for gpu tests:
+ TRACE_EVENT_INSTANT2("test_gpu", "SwapBuffersLatency",
+ TRACE_EVENT_SCOPE_THREAD,
+ "GLImpl", static_cast<int>(gfx::GetGLImplementation()),
+ "width", (is_offscreen ? offscreen_size_.width() :
+ surface_->GetSize().width()));
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoSwapBuffers",
+ "offscreen", is_offscreen,
+ "frame", this_frame_number);
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("gpu.PresentingFrame");
+ }
+
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ &is_tracing);
+ if (is_tracing) {
+ ScopedFrameBufferBinder binder(this, GetBackbufferServiceId());
+ gpu_state_tracer_->TakeSnapshotWithCurrentFramebuffer(
+ is_offscreen ? offscreen_size_ : surface_->GetSize());
+ }
+
+ // If offscreen then don't actually SwapBuffers to the display. Just copy
+ // the rendered frame to another frame buffer.
+ if (is_offscreen) {
+ TRACE_EVENT2("gpu", "Offscreen",
+ "width", offscreen_size_.width(), "height", offscreen_size_.height());
+ if (offscreen_size_ != offscreen_saved_color_texture_->size()) {
+ // Workaround for NVIDIA driver bug on OS X; crbug.com/89557,
+ // crbug.com/94163. TODO(kbr): figure out reproduction so Apple will
+ // fix this.
+ if (workarounds().needs_offscreen_buffer_workaround) {
+ offscreen_saved_frame_buffer_->Create();
+ glFinish();
+ }
+
+ // Allocate the offscreen saved color texture.
+ DCHECK(offscreen_saved_color_format_);
+ offscreen_saved_color_texture_->AllocateStorage(
+ offscreen_size_, offscreen_saved_color_format_, false);
+
+ offscreen_saved_frame_buffer_->AttachRenderTexture(
+ offscreen_saved_color_texture_.get());
+ if (offscreen_size_.width() != 0 && offscreen_size_.height() != 0) {
+ if (offscreen_saved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "because offscreen saved FBO was incomplete.";
+ LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ return;
+ }
+
+ // Clear the offscreen color texture.
+ // TODO(piman): Is this still necessary?
+ {
+ ScopedFrameBufferBinder binder(this,
+ offscreen_saved_frame_buffer_->id());
+ glClearColor(0, 0, 0, 0);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_COLOR_BUFFER_BIT);
+ RestoreClearState();
+ }
+ }
+
+ UpdateParentTextureInfo();
+ }
+
+ if (offscreen_size_.width() == 0 || offscreen_size_.height() == 0)
+ return;
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoSwapBuffers", GetErrorState());
+
+ if (IsOffscreenBufferMultisampled()) {
+ // For multisampled buffers, resolve the frame buffer.
+ ScopedResolvedFrameBufferBinder binder(this, true, false);
+ } else {
+ ScopedFrameBufferBinder binder(this,
+ offscreen_target_frame_buffer_->id());
+
+ if (offscreen_target_buffer_preserved_) {
+ // Copy the target frame buffer to the saved offscreen texture.
+ offscreen_saved_color_texture_->Copy(
+ offscreen_saved_color_texture_->size(),
+ offscreen_saved_color_format_);
+ } else {
+ // Flip the textures in the parent context via the texture manager.
+ if (!!offscreen_saved_color_texture_info_.get())
+ offscreen_saved_color_texture_info_->texture()->
+ SetServiceId(offscreen_target_color_texture_->id());
+
+ offscreen_saved_color_texture_.swap(offscreen_target_color_texture_);
+ offscreen_target_frame_buffer_->AttachRenderTexture(
+ offscreen_target_color_texture_.get());
+ }
+
+ // Ensure the side effects of the copy are visible to the parent
+ // context. There is no need to do this for ANGLE because it uses a
+ // single D3D device for all contexts.
+ if (!feature_info_->feature_flags().is_angle)
+ glFlush();
+ }
+ } else {
+ if (!surface_->SwapBuffers()) {
+ LOG(ERROR) << "Context lost because SwapBuffers failed.";
+ LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ }
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+error::Error GLES2DecoderImpl::HandleEnableFeatureCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EnableFeatureCHROMIUM& c =
+ *static_cast<const gles2::cmds::EnableFeatureCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ typedef cmds::EnableFeatureCHROMIUM::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (*result != 0) {
+ return error::kInvalidArguments;
+ }
+ std::string feature_str;
+ if (!bucket->GetAsString(&feature_str)) {
+ return error::kInvalidArguments;
+ }
+
+ // TODO(gman): make this some kind of table to function pointer thingy.
+ if (feature_str.compare("pepper3d_allow_buffers_on_multiple_targets") == 0) {
+ buffer_manager()->set_allow_buffers_on_multiple_targets(true);
+ } else if (feature_str.compare("pepper3d_support_fixed_attribs") == 0) {
+ buffer_manager()->set_allow_buffers_on_multiple_targets(true);
+ // TODO(gman): decide how to remove the need for this const_cast.
+ // I could make validators_ non const but that seems bad as this is the only
+ // place it is needed. I could make some special friend class of validators
+ // just to allow this to set them. That seems silly. I could refactor this
+ // code to use the extension mechanism or the initialization attributes to
+ // turn this feature on. Given that the only real point of this is to make
+ // the conformance tests pass and given that there is lots of real work that
+ // needs to be done it seems like refactoring for one to one of those
+ // methods is a very low priority.
+ const_cast<Validators*>(validators_)->vertex_attrib_type.AddValue(GL_FIXED);
+ } else if (feature_str.compare("webgl_enable_glsl_webgl_validation") == 0) {
+ force_webgl_glsl_validation_ = true;
+ InitializeShaderTranslator();
+ } else {
+ return error::kNoError;
+ }
+
+ *result = 1; // true.
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetRequestableExtensionsCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetRequestableExtensionsCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetRequestableExtensionsCHROMIUM*>(
+ cmd_data);
+ Bucket* bucket = CreateBucket(c.bucket_id);
+ scoped_refptr<FeatureInfo> info(new FeatureInfo());
+ info->Initialize(disallowed_features_);
+ bucket->SetFromString(info->extensions().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RequestExtensionCHROMIUM& c =
+ *static_cast<const gles2::cmds::RequestExtensionCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string feature_str;
+ if (!bucket->GetAsString(&feature_str)) {
+ return error::kInvalidArguments;
+ }
+
+ bool desire_webgl_glsl_validation =
+ feature_str.find("GL_CHROMIUM_webglsl") != std::string::npos;
+ bool desire_standard_derivatives = false;
+ bool desire_frag_depth = false;
+ bool desire_draw_buffers = false;
+ bool desire_shader_texture_lod = false;
+ if (force_webgl_glsl_validation_) {
+ desire_standard_derivatives =
+ feature_str.find("GL_OES_standard_derivatives") != std::string::npos;
+ desire_frag_depth =
+ feature_str.find("GL_EXT_frag_depth") != std::string::npos;
+ desire_draw_buffers =
+ feature_str.find("GL_EXT_draw_buffers") != std::string::npos;
+ desire_shader_texture_lod =
+ feature_str.find("GL_EXT_shader_texture_lod") != std::string::npos;
+ }
+
+ if (desire_webgl_glsl_validation != force_webgl_glsl_validation_ ||
+ desire_standard_derivatives != derivatives_explicitly_enabled_ ||
+ desire_frag_depth != frag_depth_explicitly_enabled_ ||
+ desire_draw_buffers != draw_buffers_explicitly_enabled_) {
+ force_webgl_glsl_validation_ |= desire_webgl_glsl_validation;
+ derivatives_explicitly_enabled_ |= desire_standard_derivatives;
+ frag_depth_explicitly_enabled_ |= desire_frag_depth;
+ draw_buffers_explicitly_enabled_ |= desire_draw_buffers;
+ shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod;
+ InitializeShaderTranslator();
+ }
+
+ UpdateCapabilities();
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetMultipleIntegervCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetMultipleIntegervCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetMultipleIntegervCHROMIUM*>(cmd_data);
+ GLuint count = c.count;
+ uint32 pnames_size;
+ if (!SafeMultiplyUint32(count, sizeof(GLenum), &pnames_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* pnames = GetSharedMemoryAs<const GLenum*>(
+ c.pnames_shm_id, c.pnames_shm_offset, pnames_size);
+ if (pnames == NULL) {
+ return error::kOutOfBounds;
+ }
+
+ // We have to copy them since we use them twice so the client
+ // can't change them between the time we validate them and the time we use
+ // them.
+ scoped_ptr<GLenum[]> enums(new GLenum[count]);
+ memcpy(enums.get(), pnames, pnames_size);
+
+ // Count up the space needed for the result.
+ uint32 num_results = 0;
+ for (GLuint ii = 0; ii < count; ++ii) {
+ uint32 num = util_.GLGetNumValuesReturned(enums[ii]);
+ if (num == 0) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetMultipleCHROMIUM", enums[ii], "pname");
+ return error::kNoError;
+ }
+ // Num will never be more than 4.
+ DCHECK_LE(num, 4u);
+ if (!SafeAddUint32(num_results, num, &num_results)) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ uint32 result_size = 0;
+ if (!SafeMultiplyUint32(num_results, sizeof(GLint), &result_size)) {
+ return error::kOutOfBounds;
+ }
+
+ if (result_size != static_cast<uint32>(c.size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glGetMultipleCHROMIUM", "bad size GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+
+ GLint* results = GetSharedMemoryAs<GLint*>(
+ c.results_shm_id, c.results_shm_offset, result_size);
+ if (results == NULL) {
+ return error::kOutOfBounds;
+ }
+
+ // Check the results have been cleared in case the context was lost.
+ for (uint32 ii = 0; ii < num_results; ++ii) {
+ if (results[ii]) {
+ return error::kInvalidArguments;
+ }
+ }
+
+ // Get each result.
+ GLint* start = results;
+ for (GLuint ii = 0; ii < count; ++ii) {
+ GLsizei num_written = 0;
+ if (!state_.GetStateAsGLint(enums[ii], results, &num_written) &&
+ !GetHelper(enums[ii], results, &num_written)) {
+ DoGetIntegerv(enums[ii], results);
+ }
+ results += num_written;
+ }
+
+ // Just to verify. Should this be a DCHECK?
+ if (static_cast<uint32>(results - start) != num_results) {
+ return error::kOutOfBounds;
+ }
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramInfoCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramInfoCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetProgramInfoCHROMIUM*>(cmd_data);
+ GLuint program_id = static_cast<GLuint>(c.program);
+ uint32 bucket_id = c.bucket_id;
+ Bucket* bucket = CreateBucket(bucket_id);
+ bucket->SetSize(sizeof(ProgramInfoHeader)); // in case we fail.
+ Program* program = NULL;
+ program = GetProgram(program_id);
+ if (!program || !program->IsValid()) {
+ return error::kNoError;
+ }
+ program->GetProgramInfo(program_manager(), bucket);
+ return error::kNoError;
+}
+
+error::ContextLostReason GLES2DecoderImpl::GetContextLostReason() {
+ switch (reset_status_) {
+ case GL_NO_ERROR:
+ // TODO(kbr): improve the precision of the error code in this case.
+ // Consider delegating to context for error code if MakeCurrent fails.
+ return error::kUnknown;
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ return error::kGuilty;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ return error::kInnocent;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ return error::kUnknown;
+ }
+
+ NOTREACHED();
+ return error::kUnknown;
+}
+
+bool GLES2DecoderImpl::WasContextLost() {
+ if (reset_status_ != GL_NO_ERROR) {
+ return true;
+ }
+ if (context_->WasAllocatedUsingRobustnessExtension()) {
+ GLenum status = GL_NO_ERROR;
+ if (has_robustness_extension_)
+ status = glGetGraphicsResetStatusARB();
+ if (status != GL_NO_ERROR) {
+ // The graphics card was reset. Signal a lost context to the application.
+ reset_status_ = status;
+ reset_by_robustness_extension_ = true;
+ LOG(ERROR) << (surface_->IsOffscreen() ? "Offscreen" : "Onscreen")
+ << " context lost via ARB/EXT_robustness. Reset status = "
+ << GLES2Util::GetStringEnum(status);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool GLES2DecoderImpl::WasContextLostByRobustnessExtension() {
+ return WasContextLost() && reset_by_robustness_extension_;
+}
+
+void GLES2DecoderImpl::LoseContext(uint32 reset_status) {
+ // Only loses the context once.
+ if (reset_status_ != GL_NO_ERROR) {
+ return;
+ }
+
+ // Marks this context as lost.
+ reset_status_ = reset_status;
+ current_decoder_error_ = error::kLostContext;
+}
+
+error::Error GLES2DecoderImpl::HandleInsertSyncPointCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitSyncPointCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::WaitSyncPointCHROMIUM& c =
+ *static_cast<const gles2::cmds::WaitSyncPointCHROMIUM*>(cmd_data);
+ group_->mailbox_manager()->PullTextureUpdates();
+ if (wait_sync_point_callback_.is_null())
+ return error::kNoError;
+
+ return wait_sync_point_callback_.Run(c.sync_point) ?
+ error::kNoError : error::kDeferCommandUntilLater;
+}
+
+error::Error GLES2DecoderImpl::HandleDiscardBackbufferCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ if (surface_->DeferDraws())
+ return error::kDeferCommandUntilLater;
+ if (!surface_->SetBackbufferAllocation(false))
+ return error::kLostContext;
+ backbuffer_needs_clear_bits_ |= GL_COLOR_BUFFER_BIT;
+ backbuffer_needs_clear_bits_ |= GL_DEPTH_BUFFER_BIT;
+ backbuffer_needs_clear_bits_ |= GL_STENCIL_BUFFER_BIT;
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GenQueriesEXTHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (query_manager_->GetQuery(client_ids[ii])) {
+ return false;
+ }
+ }
+ query_manager_->GenQueries(n, client_ids);
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteQueriesEXTHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ QueryManager::Query* query = query_manager_->GetQuery(client_ids[ii]);
+ if (query && !query->IsDeleted()) {
+ ContextState::QueryMap::iterator it =
+ state_.current_queries.find(query->target());
+ if (it != state_.current_queries.end())
+ state_.current_queries.erase(it);
+
+ query->Destroy(true);
+ }
+ query_manager_->RemoveQuery(client_ids[ii]);
+ }
+}
+
+bool GLES2DecoderImpl::ProcessPendingQueries() {
+ if (query_manager_.get() == NULL) {
+ return false;
+ }
+ if (!query_manager_->ProcessPendingQueries()) {
+ current_decoder_error_ = error::kOutOfBounds;
+ }
+ return query_manager_->HavePendingQueries();
+}
+
+// Note that if there are no pending readpixels right now,
+// this function will call the callback immediately.
+void GLES2DecoderImpl::WaitForReadPixels(base::Closure callback) {
+ if (features().use_async_readpixels && !pending_readpixel_fences_.empty()) {
+ pending_readpixel_fences_.back()->callbacks.push_back(callback);
+ } else {
+ callback.Run();
+ }
+}
+
+void GLES2DecoderImpl::ProcessPendingReadPixels() {
+ while (!pending_readpixel_fences_.empty() &&
+ pending_readpixel_fences_.front()->fence->HasCompleted()) {
+ std::vector<base::Closure> callbacks =
+ pending_readpixel_fences_.front()->callbacks;
+ pending_readpixel_fences_.pop();
+ for (size_t i = 0; i < callbacks.size(); i++) {
+ callbacks[i].Run();
+ }
+ }
+}
+
+bool GLES2DecoderImpl::HasMoreIdleWork() {
+ return !pending_readpixel_fences_.empty() ||
+ async_pixel_transfer_manager_->NeedsProcessMorePendingTransfers();
+}
+
+void GLES2DecoderImpl::PerformIdleWork() {
+ ProcessPendingReadPixels();
+ if (!async_pixel_transfer_manager_->NeedsProcessMorePendingTransfers())
+ return;
+ async_pixel_transfer_manager_->ProcessMorePendingTransfers();
+ ProcessFinishedAsyncTransfers();
+}
+
+error::Error GLES2DecoderImpl::HandleBeginQueryEXT(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BeginQueryEXT& c =
+ *static_cast<const gles2::cmds::BeginQueryEXT*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint client_id = static_cast<GLuint>(c.id);
+ int32 sync_shm_id = static_cast<int32>(c.sync_data_shm_id);
+ uint32 sync_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ switch (target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ case GL_LATENCY_QUERY_CHROMIUM:
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ break;
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ if (!features().chromium_sync_query) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for commands completed queries");
+ return error::kNoError;
+ }
+ break;
+ default:
+ if (!features().occlusion_query_boolean) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for occlusion queries");
+ return error::kNoError;
+ }
+ break;
+ }
+
+ if (state_.current_queries.find(target) != state_.current_queries.end()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress");
+ return error::kNoError;
+ }
+
+ if (client_id == 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
+ return error::kNoError;
+ }
+
+ QueryManager::Query* query = query_manager_->GetQuery(client_id);
+ if (!query) {
+ if (!query_manager_->IsValidQuery(client_id)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBeginQueryEXT",
+ "id not made by glGenQueriesEXT");
+ return error::kNoError;
+ }
+ query = query_manager_->CreateQuery(
+ target, client_id, sync_shm_id, sync_shm_offset);
+ }
+
+ if (query->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match");
+ return error::kNoError;
+ } else if (query->shm_id() != sync_shm_id ||
+ query->shm_offset() != sync_shm_offset) {
+ DLOG(ERROR) << "Shared memory used by query not the same as before";
+ return error::kInvalidArguments;
+ }
+
+ if (!query_manager_->BeginQuery(query)) {
+ return error::kOutOfBounds;
+ }
+
+ state_.current_queries[target] = query;
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEndQueryEXT(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EndQueryEXT& c =
+ *static_cast<const gles2::cmds::EndQueryEXT*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32 submit_count = static_cast<GLuint>(c.submit_count);
+ ContextState::QueryMap::iterator it = state_.current_queries.find(target);
+
+ if (it == state_.current_queries.end()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glEndQueryEXT", "No active query");
+ return error::kNoError;
+ }
+
+ QueryManager::Query* query = it->second.get();
+ if (!query_manager_->EndQuery(query, submit_count)) {
+ return error::kOutOfBounds;
+ }
+
+ query_manager_->ProcessPendingTransferQueries();
+
+ state_.current_queries.erase(it);
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GenVertexArraysOESHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetVertexAttribManager(client_ids[ii])) {
+ return false;
+ }
+ }
+
+ if (!features().native_vertex_array_object) {
+ // Emulated VAO
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateVertexAttribManager(client_ids[ii], 0, true);
+ }
+ } else {
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+
+ glGenVertexArraysOES(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateVertexAttribManager(client_ids[ii], service_ids[ii], true);
+ }
+ }
+
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteVertexArraysOESHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ VertexAttribManager* vao =
+ GetVertexAttribManager(client_ids[ii]);
+ if (vao && !vao->IsDeleted()) {
+ if (state_.vertex_attrib_manager.get() == vao) {
+ DoBindVertexArrayOES(0);
+ }
+ RemoveVertexAttribManager(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoBindVertexArrayOES(GLuint client_id) {
+ VertexAttribManager* vao = NULL;
+ if (client_id != 0) {
+ vao = GetVertexAttribManager(client_id);
+ if (!vao) {
+ // Unlike most Bind* methods, the spec explicitly states that VertexArray
+ // only allows names that have been previously generated. As such, we do
+ // not generate new names here.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindVertexArrayOES", "bad vertex array id.");
+ current_decoder_error_ = error::kNoError;
+ return;
+ }
+ } else {
+ vao = state_.default_vertex_attrib_manager.get();
+ }
+
+ // Only set the VAO state if it's changed
+ if (state_.vertex_attrib_manager.get() != vao) {
+ state_.vertex_attrib_manager = vao;
+ if (!features().native_vertex_array_object) {
+ EmulateVertexArrayState();
+ } else {
+ GLuint service_id = vao->service_id();
+ glBindVertexArrayOES(service_id);
+ }
+ }
+}
+
+// Used when OES_vertex_array_object isn't natively supported
+void GLES2DecoderImpl::EmulateVertexArrayState() {
+ // Setup the Vertex attribute state
+ for (uint32 vv = 0; vv < group_->max_vertex_attribs(); ++vv) {
+ RestoreStateForAttrib(vv, true);
+ }
+
+ // Setup the element buffer
+ Buffer* element_array_buffer =
+ state_.vertex_attrib_manager->element_array_buffer();
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer ? element_array_buffer->service_id() : 0);
+}
+
+bool GLES2DecoderImpl::DoIsVertexArrayOES(GLuint client_id) {
+ const VertexAttribManager* vao =
+ GetVertexAttribManager(client_id);
+ return vao && vao->IsValid() && !vao->IsDeleted();
+}
+
+#if defined(OS_MACOSX)
+void GLES2DecoderImpl::ReleaseIOSurfaceForTexture(GLuint texture_id) {
+ TextureToIOSurfaceMap::iterator it = texture_to_io_surface_map_.find(
+ texture_id);
+ if (it != texture_to_io_surface_map_.end()) {
+ // Found a previous IOSurface bound to this texture; release it.
+ IOSurfaceRef surface = it->second;
+ CFRelease(surface);
+ texture_to_io_surface_map_.erase(it);
+ }
+}
+#endif
+
+void GLES2DecoderImpl::DoTexImageIOSurface2DCHROMIUM(
+ GLenum target, GLsizei width, GLsizei height,
+ GLuint io_surface_id, GLuint plane) {
+#if defined(OS_MACOSX)
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "only supported on desktop GL.");
+ return;
+ }
+
+ if (target != GL_TEXTURE_RECTANGLE_ARB) {
+ // This might be supported in the future, and if we could require
+ // support for binding an IOSurface to a NPOT TEXTURE_2D texture, we
+ // could delete a lot of code. For now, perform strict validation so we
+ // know what's going on.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM",
+ "requires TEXTURE_RECTANGLE_ARB target");
+ return;
+ }
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "no rectangle texture bound");
+ return;
+ }
+
+ // Look up the new IOSurface. Note that because of asynchrony
+ // between processes this might fail; during live resizing the
+ // plugin process might allocate and release an IOSurface before
+ // this process gets a chance to look it up. Hold on to any old
+ // IOSurface in this case.
+ IOSurfaceRef surface = IOSurfaceLookup(io_surface_id);
+ if (!surface) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "no IOSurface with the given ID");
+ return;
+ }
+
+ // Release any IOSurface previously bound to this texture.
+ ReleaseIOSurfaceForTexture(texture_ref->service_id());
+
+ // Make sure we release the IOSurface even if CGLTexImageIOSurface2D fails.
+ texture_to_io_surface_map_.insert(
+ std::make_pair(texture_ref->service_id(), surface));
+
+ CGLContextObj context =
+ static_cast<CGLContextObj>(context_->GetHandle());
+
+ CGLError err = CGLTexImageIOSurface2D(
+ context,
+ target,
+ GL_RGBA,
+ width,
+ height,
+ GL_BGRA,
+ GL_UNSIGNED_INT_8_8_8_8_REV,
+ surface,
+ plane);
+
+ if (err != kCGLNoError) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "error in CGLTexImageIOSurface2D");
+ return;
+ }
+
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, width, height, 1, 0,
+ GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, true);
+
+#else
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "not supported.");
+#endif
+}
+
+static GLenum ExtractFormatFromStorageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB565:
+ return GL_RGB;
+ case GL_RGBA4:
+ return GL_RGBA;
+ case GL_RGB5_A1:
+ return GL_RGBA;
+ case GL_RGB8_OES:
+ return GL_RGB;
+ case GL_RGBA8_OES:
+ return GL_RGBA;
+ case GL_LUMINANCE8_ALPHA8_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_LUMINANCE8_EXT:
+ return GL_LUMINANCE;
+ case GL_ALPHA8_EXT:
+ return GL_ALPHA;
+ case GL_RGBA32F_EXT:
+ return GL_RGBA;
+ case GL_RGB32F_EXT:
+ return GL_RGB;
+ case GL_ALPHA32F_EXT:
+ return GL_ALPHA;
+ case GL_LUMINANCE32F_EXT:
+ return GL_LUMINANCE;
+ case GL_LUMINANCE_ALPHA32F_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_RGBA16F_EXT:
+ return GL_RGBA;
+ case GL_RGB16F_EXT:
+ return GL_RGB;
+ case GL_ALPHA16F_EXT:
+ return GL_ALPHA;
+ case GL_LUMINANCE16F_EXT:
+ return GL_LUMINANCE;
+ case GL_LUMINANCE_ALPHA16F_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_BGRA8_EXT:
+ return GL_BGRA_EXT;
+ default:
+ return GL_NONE;
+ }
+}
+
+void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
+ GLenum target, GLuint source_id, GLuint dest_id, GLint level,
+ GLenum internal_format, GLenum dest_type) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCopyTextureCHROMIUM");
+
+ TextureRef* dest_texture_ref = GetTexture(dest_id);
+ TextureRef* source_texture_ref = GetTexture(source_id);
+
+ if (!source_texture_ref || !dest_texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "unknown texture id");
+ return;
+ }
+
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "invalid texture target");
+ return;
+ }
+
+ Texture* source_texture = source_texture_ref->texture();
+ Texture* dest_texture = dest_texture_ref->texture();
+ if (dest_texture->target() != GL_TEXTURE_2D ||
+ (source_texture->target() != GL_TEXTURE_2D &&
+ source_texture->target() != GL_TEXTURE_RECTANGLE_ARB &&
+ source_texture->target() != GL_TEXTURE_EXTERNAL_OES)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureCHROMIUM",
+ "invalid texture target binding");
+ return;
+ }
+
+ int source_width, source_height, dest_width, dest_height;
+
+ gfx::GLImage* image =
+ source_texture->GetLevelImage(source_texture->target(), 0);
+ if (image) {
+ gfx::Size size = image->GetSize();
+ source_width = size.width();
+ source_height = size.height();
+ if (source_width <= 0 || source_height <= 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCopyTextureChromium", "invalid image size");
+ return;
+ }
+ } else {
+ if (!source_texture->GetLevelSize(
+ source_texture->target(), 0, &source_width, &source_height)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureChromium",
+ "source texture has no level 0");
+ return;
+ }
+
+ // Check that this type of texture is allowed.
+ if (!texture_manager()->ValidForTarget(
+ source_texture->target(), level, source_width, source_height, 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "Bad dimensions");
+ return;
+ }
+ }
+
+ // Clear the source texture if necessary.
+ if (!texture_manager()->ClearTextureLevel(
+ this, source_texture_ref, source_texture->target(), 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTextureCHROMIUM", "dimensions too big");
+ return;
+ }
+
+ GLenum source_type = 0;
+ GLenum source_internal_format = 0;
+ source_texture->GetLevelType(
+ source_texture->target(), 0, &source_type, &source_internal_format);
+
+ // The destination format should be GL_RGB, or GL_RGBA. GL_ALPHA,
+ // GL_LUMINANCE, and GL_LUMINANCE_ALPHA are not supported because they are not
+ // renderable on some platforms.
+ bool valid_dest_format = internal_format == GL_RGB ||
+ internal_format == GL_RGBA ||
+ internal_format == GL_BGRA_EXT;
+ bool valid_source_format = source_internal_format == GL_ALPHA ||
+ source_internal_format == GL_RGB ||
+ source_internal_format == GL_RGBA ||
+ source_internal_format == GL_LUMINANCE ||
+ source_internal_format == GL_LUMINANCE_ALPHA ||
+ source_internal_format == GL_BGRA_EXT;
+ if (!valid_source_format || !valid_dest_format) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glCopyTextureCHROMIUM",
+ "invalid internal format");
+ return;
+ }
+
+ // Defer initializing the CopyTextureCHROMIUMResourceManager until it is
+ // needed because it takes 10s of milliseconds to initialize.
+ if (!copy_texture_CHROMIUM_.get()) {
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTextureCHROMIUM");
+ copy_texture_CHROMIUM_.reset(new CopyTextureCHROMIUMResourceManager());
+ copy_texture_CHROMIUM_->Initialize(this);
+ RestoreCurrentFramebufferBindings();
+ if (LOCAL_PEEK_GL_ERROR("glCopyTextureCHROMIUM") != GL_NO_ERROR)
+ return;
+ }
+
+ GLenum dest_type_previous = dest_type;
+ GLenum dest_internal_format = internal_format;
+ bool dest_level_defined = dest_texture->GetLevelSize(
+ GL_TEXTURE_2D, level, &dest_width, &dest_height);
+
+ if (dest_level_defined) {
+ dest_texture->GetLevelType(GL_TEXTURE_2D, level, &dest_type_previous,
+ &dest_internal_format);
+ }
+
+ // Resize the destination texture to the dimensions of the source texture.
+ if (!dest_level_defined || dest_width != source_width ||
+ dest_height != source_height ||
+ dest_internal_format != internal_format ||
+ dest_type_previous != dest_type) {
+ // Ensure that the glTexImage2D succeeds.
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTextureCHROMIUM");
+ glBindTexture(GL_TEXTURE_2D, dest_texture->service_id());
+ glTexImage2D(
+ GL_TEXTURE_2D, level, internal_format, source_width, source_height,
+ 0, internal_format, dest_type, NULL);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCopyTextureCHROMIUM");
+ if (error != GL_NO_ERROR) {
+ RestoreCurrentTextureBindings(&state_, GL_TEXTURE_2D);
+ return;
+ }
+
+ texture_manager()->SetLevelInfo(
+ dest_texture_ref, GL_TEXTURE_2D, level, internal_format, source_width,
+ source_height, 1, 0, internal_format, dest_type, true);
+ } else {
+ texture_manager()->SetLevelCleared(
+ dest_texture_ref, GL_TEXTURE_2D, level, true);
+ }
+
+ ScopedModifyPixels modify(dest_texture_ref);
+
+ // Try using GLImage::CopyTexImage when possible.
+ bool unpack_premultiply_alpha_change =
+ unpack_premultiply_alpha_ ^ unpack_unpremultiply_alpha_;
+ if (image && !unpack_flip_y_ && !unpack_premultiply_alpha_change && !level) {
+ glBindTexture(GL_TEXTURE_2D, dest_texture->service_id());
+ if (image->CopyTexImage(GL_TEXTURE_2D))
+ return;
+ }
+
+ DoWillUseTexImageIfNeeded(source_texture, source_texture->target());
+
+ // GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix
+ // before presenting.
+ if (source_texture->target() == GL_TEXTURE_EXTERNAL_OES) {
+ // TODO(hkuang): get the StreamTexture transform matrix in GPU process
+ // instead of using default matrix crbug.com/226218.
+ const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+ copy_texture_CHROMIUM_->DoCopyTextureWithTransform(
+ this,
+ source_texture->target(),
+ source_texture->service_id(),
+ dest_texture->service_id(),
+ level,
+ source_width,
+ source_height,
+ unpack_flip_y_,
+ unpack_premultiply_alpha_,
+ unpack_unpremultiply_alpha_,
+ default_matrix);
+ } else {
+ copy_texture_CHROMIUM_->DoCopyTexture(this,
+ source_texture->target(),
+ source_texture->service_id(),
+ source_internal_format,
+ dest_texture->service_id(),
+ level,
+ internal_format,
+ source_width,
+ source_height,
+ unpack_flip_y_,
+ unpack_premultiply_alpha_,
+ unpack_unpremultiply_alpha_);
+ }
+
+ DoDidUseTexImageIfNeeded(source_texture, source_texture->target());
+}
+
+static GLenum ExtractTypeFromStorageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB565:
+ return GL_UNSIGNED_SHORT_5_6_5;
+ case GL_RGBA4:
+ return GL_UNSIGNED_SHORT_4_4_4_4;
+ case GL_RGB5_A1:
+ return GL_UNSIGNED_SHORT_5_5_5_1;
+ case GL_RGB8_OES:
+ return GL_UNSIGNED_BYTE;
+ case GL_RGBA8_OES:
+ return GL_UNSIGNED_BYTE;
+ case GL_LUMINANCE8_ALPHA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_LUMINANCE8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_ALPHA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_RGBA32F_EXT:
+ return GL_FLOAT;
+ case GL_RGB32F_EXT:
+ return GL_FLOAT;
+ case GL_ALPHA32F_EXT:
+ return GL_FLOAT;
+ case GL_LUMINANCE32F_EXT:
+ return GL_FLOAT;
+ case GL_LUMINANCE_ALPHA32F_EXT:
+ return GL_FLOAT;
+ case GL_RGBA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_RGB16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_ALPHA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_LUMINANCE16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_LUMINANCE_ALPHA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_BGRA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ default:
+ return GL_NONE;
+ }
+}
+
+void GLES2DecoderImpl::DoTexStorage2DEXT(
+ GLenum target,
+ GLint levels,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoTexStorage2DEXT",
+ "width", width, "height", height);
+ if (!texture_manager()->ValidForTarget(target, 0, width, height, 1) ||
+ TextureManager::ComputeMipMapCount(target, width, height, 1) < levels) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexStorage2DEXT", "dimensions out of range");
+ return;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexStorage2DEXT", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexStorage2DEXT", "texture is immutable");
+ return;
+ }
+
+ GLenum format = ExtractFormatFromStorageFormat(internal_format);
+ GLenum type = ExtractTypeFromStorageFormat(internal_format);
+
+ {
+ GLsizei level_width = width;
+ GLsizei level_height = height;
+ uint32 estimated_size = 0;
+ for (int ii = 0; ii < levels; ++ii) {
+ uint32 level_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ level_width, level_height, format, type, state_.unpack_alignment,
+ &estimated_size, NULL, NULL) ||
+ !SafeAddUint32(estimated_size, level_size, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexStorage2DEXT", "dimensions too large");
+ return;
+ }
+ level_width = std::max(1, level_width >> 1);
+ level_height = std::max(1, level_height >> 1);
+ }
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexStorage2DEXT", "out of memory");
+ return;
+ }
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glTexStorage2DEXT");
+ glTexStorage2DEXT(target, levels, internal_format, width, height);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glTexStorage2DEXT");
+ if (error == GL_NO_ERROR) {
+ GLsizei level_width = width;
+ GLsizei level_height = height;
+ for (int ii = 0; ii < levels; ++ii) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, ii, format,
+ level_width, level_height, 1, 0, format, type, false);
+ level_width = std::max(1, level_width >> 1);
+ level_height = std::max(1, level_height >> 1);
+ }
+ texture->SetImmutable(true);
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleGenMailboxCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+void GLES2DecoderImpl::DoProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoProduceTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ ProduceTextureRef("glProduceTextureCHROMIUM", texture_ref, target, data);
+}
+
+void GLES2DecoderImpl::DoProduceTextureDirectCHROMIUM(GLuint client_id,
+ GLenum target, const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoProduceTextureDirectCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+
+ ProduceTextureRef("glProduceTextureDirectCHROMIUM", GetTexture(client_id),
+ target, data);
+}
+
+void GLES2DecoderImpl::ProduceTextureRef(std::string func_name,
+ TextureRef* texture_ref, GLenum target, const GLbyte* data) {
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << func_name << " was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "unknown texture for target");
+ return;
+ }
+
+ Texture* produced = texture_manager()->Produce(texture_ref);
+ if (!produced) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "invalid texture");
+ return;
+ }
+
+ if (produced->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "invalid target");
+ return;
+ }
+
+ group_->mailbox_manager()->ProduceTexture(target, mailbox, produced);
+}
+
+void GLES2DecoderImpl::DoConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoConsumeTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << "ConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+
+ scoped_refptr<TextureRef> texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref.get()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM",
+ "unknown texture for target");
+ return;
+ }
+ GLuint client_id = texture_ref->client_id();
+ if (!client_id) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "unknown texture for target");
+ return;
+ }
+ Texture* texture = group_->mailbox_manager()->ConsumeTexture(target, mailbox);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "invalid mailbox name");
+ return;
+ }
+ if (texture->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "invalid target");
+ return;
+ }
+
+ DeleteTexturesHelper(1, &client_id);
+ texture_ref = texture_manager()->Consume(client_id, texture);
+ glBindTexture(target, texture_ref->service_id());
+
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ unit.bind_target = target;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ unit.bound_texture_2d = texture_ref;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ unit.bound_texture_cube_map = texture_ref;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ unit.bound_texture_external_oes = texture_ref;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ unit.bound_texture_rectangle_arb = texture_ref;
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleCreateAndConsumeTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate& c =
+ *static_cast<
+ const gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate*>(
+ cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCreateAndConsumeTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ uint32_t client_id = c.client_id;
+ DoCreateAndConsumeTextureCHROMIUM(target, mailbox, client_id);
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoCreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data, GLuint client_id) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoCreateAndConsumeTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << "CreateAndConsumeTextureCHROMIUM was "
+ "passed a mailbox that was not "
+ "generated by GenMailboxCHROMIUM.";
+
+ TextureRef* texture_ref = GetTexture(client_id);
+ if (texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "client id already in use");
+ return;
+ }
+ Texture* texture = group_->mailbox_manager()->ConsumeTexture(target, mailbox);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "invalid mailbox name");
+ return;
+ }
+ if (texture->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "invalid target");
+ return;
+ }
+
+ texture_ref = texture_manager()->Consume(client_id, texture);
+}
+
+void GLES2DecoderImpl::DoInsertEventMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ if (!marker) {
+ marker = "";
+ }
+ debug_marker_manager_.SetMarker(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2DecoderImpl::DoPushGroupMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ if (!marker) {
+ marker = "";
+ }
+ std::string name = length ? std::string(marker, length) : std::string(marker);
+ debug_marker_manager_.PushGroup(name);
+ gpu_tracer_->Begin(name, kTraceGroupMarker);
+}
+
+void GLES2DecoderImpl::DoPopGroupMarkerEXT(void) {
+ debug_marker_manager_.PopGroup();
+ gpu_tracer_->End(kTraceGroupMarker);
+}
+
+void GLES2DecoderImpl::DoBindTexImage2DCHROMIUM(
+ GLenum target, GLint image_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM");
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM,
+ "glBindTexImage2DCHROMIUM", "invalid target");
+ return;
+ }
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "no texture bound");
+ return;
+ }
+
+ gfx::GLImage* gl_image = image_manager()->LookupImage(image_id);
+ if (!gl_image) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "no image found with the given ID");
+ return;
+ }
+
+ {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM", GetErrorState());
+ if (!gl_image->BindTexImage(target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "fail to bind image with the given ID");
+ return;
+ }
+ }
+
+ gfx::Size size = gl_image->GetSize();
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, size.width(), size.height(), 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, true);
+ texture_manager()->SetLevelImage(texture_ref, target, 0, gl_image);
+}
+
+void GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM(
+ GLenum target, GLint image_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM");
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glReleaseTexImage2DCHROMIUM", "no texture bound");
+ return;
+ }
+
+ gfx::GLImage* gl_image = image_manager()->LookupImage(image_id);
+ if (!gl_image) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glReleaseTexImage2DCHROMIUM", "no image found with the given ID");
+ return;
+ }
+
+ // Do nothing when image is not currently bound.
+ if (texture_ref->texture()->GetLevelImage(target, 0) != gl_image)
+ return;
+
+ {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM", GetErrorState());
+ gl_image->ReleaseTexImage(target);
+ }
+
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, 0, 0, 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, false);
+}
+
+error::Error GLES2DecoderImpl::HandleTraceBeginCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TraceBeginCHROMIUM& c =
+ *static_cast<const gles2::cmds::TraceBeginCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string command_name;
+ if (!bucket->GetAsString(&command_name)) {
+ return error::kInvalidArguments;
+ }
+ TRACE_EVENT_COPY_ASYNC_BEGIN0("gpu", command_name.c_str(), this);
+ if (!gpu_tracer_->Begin(command_name, kTraceCHROMIUM)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTraceBeginCHROMIUM", "unable to create begin trace");
+ return error::kNoError;
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoTraceEndCHROMIUM() {
+ if (gpu_tracer_->CurrentName().empty()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTraceEndCHROMIUM", "no trace begin found");
+ return;
+ }
+ TRACE_EVENT_COPY_ASYNC_END0("gpu", gpu_tracer_->CurrentName().c_str(), this);
+ gpu_tracer_->End(kTraceCHROMIUM);
+}
+
+void GLES2DecoderImpl::DoDrawBuffersEXT(
+ GLsizei count, const GLenum* bufs) {
+ if (count > static_cast<GLsizei>(group_->max_draw_buffers())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glDrawBuffersEXT", "greater than GL_MAX_DRAW_BUFFERS_EXT");
+ return;
+ }
+
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ for (GLsizei i = 0; i < count; ++i) {
+ if (bufs[i] != static_cast<GLenum>(GL_COLOR_ATTACHMENT0 + i) &&
+ bufs[i] != GL_NONE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawBuffersEXT",
+ "bufs[i] not GL_NONE or GL_COLOR_ATTACHMENTi_EXT");
+ return;
+ }
+ }
+ glDrawBuffersARB(count, bufs);
+ framebuffer->SetDrawBuffers(count, bufs);
+ } else { // backbuffer
+ if (count > 1 ||
+ (bufs[0] != GL_BACK && bufs[0] != GL_NONE)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawBuffersEXT",
+ "more than one buffer or bufs not GL_NONE or GL_BACK");
+ return;
+ }
+ GLenum mapped_buf = bufs[0];
+ if (GetBackbufferServiceId() != 0 && // emulated backbuffer
+ bufs[0] == GL_BACK) {
+ mapped_buf = GL_COLOR_ATTACHMENT0;
+ }
+ glDrawBuffersARB(count, &mapped_buf);
+ group_->set_draw_buffer(bufs[0]);
+ }
+}
+
+void GLES2DecoderImpl::DoLoseContextCHROMIUM(GLenum current, GLenum other) {
+ group_->LoseContexts(other);
+ reset_status_ = current;
+ current_decoder_error_ = error::kLostContext;
+}
+
+void GLES2DecoderImpl::DoMatrixLoadfCHROMIUM(GLenum matrix_mode,
+ const GLfloat* matrix) {
+ DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
+ matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadfCHROMIUM",
+ "function not available");
+ return;
+ }
+
+ GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
+ ? state_.projection_matrix
+ : state_.modelview_matrix;
+ memcpy(target_matrix, matrix, sizeof(GLfloat) * 16);
+ // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
+ // since the values of the _NV and _CHROMIUM tokens match.
+ glMatrixLoadfEXT(matrix_mode, matrix);
+}
+
+void GLES2DecoderImpl::DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode) {
+ DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
+ matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
+
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadIdentityCHROMIUM",
+ "function not available");
+ return;
+ }
+
+ static GLfloat kIdentityMatrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+
+ GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
+ ? state_.projection_matrix
+ : state_.modelview_matrix;
+ memcpy(target_matrix, kIdentityMatrix, sizeof(kIdentityMatrix));
+ // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
+ // since the values of the _NV and _CHROMIUM tokens match.
+ glMatrixLoadIdentityEXT(matrix_mode);
+}
+
+bool GLES2DecoderImpl::ValidateAsyncTransfer(
+ const char* function_name,
+ TextureRef* texture_ref,
+ GLenum target,
+ GLint level,
+ const void * data) {
+ // We only support async uploads to 2D textures for now.
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, target, "target");
+ return false;
+ }
+ // We only support uploads to level zero for now.
+ if (level != 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "level != 0");
+ return false;
+ }
+ // A transfer buffer must be bound, even for asyncTexImage2D.
+ if (data == NULL) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, "buffer == 0");
+ return false;
+ }
+ // We only support one async transfer in progress.
+ if (!texture_ref ||
+ async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "transfer already in progress");
+ return false;
+ }
+ return true;
+}
+
+base::Closure GLES2DecoderImpl::AsyncUploadTokenCompletionClosure(
+ uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset) {
+ scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_data_shm_id);
+ if (!buffer.get() ||
+ !buffer->GetDataAddress(sync_data_shm_offset, sizeof(AsyncUploadSync)))
+ return base::Closure();
+
+ AsyncMemoryParams mem_params(buffer,
+ sync_data_shm_offset,
+ sizeof(AsyncUploadSync));
+
+ scoped_refptr<AsyncUploadTokenCompletionObserver> observer(
+ new AsyncUploadTokenCompletionObserver(async_upload_token));
+
+ return base::Bind(
+ &AsyncPixelTransferManager::AsyncNotifyCompletion,
+ base::Unretained(GetAsyncPixelTransferManager()),
+ mem_params,
+ observer);
+}
+
+error::Error GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AsyncTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::AsyncTexImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
+ uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
+ uint32 pixels_size;
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
+
+ // TODO(epenner): Move this and copies of this memory validation
+ // into ValidateTexImage2D step.
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size, NULL,
+ NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = NULL;
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAs<const void*>(
+ pixels_shm_id, pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ TextureManager::DoTextImage2DArguments args = {
+ target, level, internal_format, width, height, border, format, type,
+ pixels, pixels_size};
+ TextureRef* texture_ref;
+ // All the normal glTexSubImage2D validation.
+ if (!texture_manager()->ValidateTexImage2D(
+ &state_, "glAsyncTexImage2DCHROMIUM", args, &texture_ref)) {
+ return error::kNoError;
+ }
+
+ // Extra async validation.
+ Texture* texture = texture_ref->texture();
+ if (!ValidateAsyncTransfer(
+ "glAsyncTexImage2DCHROMIUM", texture_ref, target, level, pixels))
+ return error::kNoError;
+
+ // Don't allow async redefinition of a textures.
+ if (texture->IsDefined()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glAsyncTexImage2DCHROMIUM", "already defined");
+ return error::kNoError;
+ }
+
+ if (!EnsureGPUMemoryAvailable(pixels_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glAsyncTexImage2DCHROMIUM", "out of memory");
+ return error::kNoError;
+ }
+
+ // Setup the parameters.
+ AsyncTexImage2DParams tex_params = {
+ target, level, static_cast<GLenum>(internal_format),
+ width, height, border, format, type};
+ AsyncMemoryParams mem_params(
+ GetSharedMemoryBuffer(c.pixels_shm_id), c.pixels_shm_offset, pixels_size);
+
+ // Set up the async state if needed, and make the texture
+ // immutable so the async state stays valid. The level info
+ // is set up lazily when the transfer completes.
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->CreatePixelTransferDelegate(texture_ref,
+ tex_params);
+ texture->SetImmutable(true);
+
+ delegate->AsyncTexImage2D(
+ tex_params,
+ mem_params,
+ base::Bind(&TextureManager::SetLevelInfoFromParams,
+ // The callback is only invoked if the transfer delegate still
+ // exists, which implies through manager->texture_ref->state
+ // ownership that both of these pointers are valid.
+ base::Unretained(texture_manager()),
+ base::Unretained(texture_ref),
+ tex_params));
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleAsyncTexSubImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AsyncTexSubImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::AsyncTexSubImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexSubImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
+
+ // TODO(epenner): Move this and copies of this memory validation
+ // into ValidateTexSubImage2D step.
+ uint32 data_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &data_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+
+ // All the normal glTexSubImage2D validation.
+ error::Error error = error::kNoError;
+ if (!ValidateTexSubImage2D(&error, "glAsyncTexSubImage2DCHROMIUM",
+ target, level, xoffset, yoffset, width, height, format, type, pixels)) {
+ return error;
+ }
+
+ // Extra async validation.
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ Texture* texture = texture_ref->texture();
+ if (!ValidateAsyncTransfer(
+ "glAsyncTexSubImage2DCHROMIUM", texture_ref, target, level, pixels))
+ return error::kNoError;
+
+ // Guarantee async textures are always 'cleared' as follows:
+ // - AsyncTexImage2D can not redefine an existing texture
+ // - AsyncTexImage2D must initialize the entire image via non-null buffer.
+ // - AsyncTexSubImage2D clears synchronously if not already cleared.
+ // - Textures become immutable after an async call.
+ // This way we know in all cases that an async texture is always clear.
+ if (!texture->SafeToRenderFrom()) {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref,
+ target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glAsyncTexSubImage2DCHROMIUM", "dimensions too big");
+ return error::kNoError;
+ }
+ }
+
+ // Setup the parameters.
+ AsyncTexSubImage2DParams tex_params = {target, level, xoffset, yoffset,
+ width, height, format, type};
+ AsyncMemoryParams mem_params(
+ GetSharedMemoryBuffer(c.data_shm_id), c.data_shm_offset, data_size);
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->GetPixelTransferDelegate(texture_ref);
+ if (!delegate) {
+ // TODO(epenner): We may want to enforce exclusive use
+ // of async APIs in which case this should become an error,
+ // (the texture should have been async defined).
+ AsyncTexImage2DParams define_params = {target, level,
+ 0, 0, 0, 0, 0, 0};
+ texture->GetLevelSize(target, level, &define_params.width,
+ &define_params.height);
+ texture->GetLevelType(target, level, &define_params.type,
+ &define_params.internal_format);
+ // Set up the async state if needed, and make the texture
+ // immutable so the async state stays valid.
+ delegate = async_pixel_transfer_manager_->CreatePixelTransferDelegate(
+ texture_ref, define_params);
+ texture->SetImmutable(true);
+ }
+
+ delegate->AsyncTexSubImage2D(tex_params, mem_params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::WaitAsyncTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::WaitAsyncTexImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM, "glWaitAsyncTexImage2DCHROMIUM", "target");
+ return error::kNoError;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glWaitAsyncTexImage2DCHROMIUM", "unknown texture");
+ return error::kNoError;
+ }
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->GetPixelTransferDelegate(texture_ref);
+ if (!delegate) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glWaitAsyncTexImage2DCHROMIUM", "No async transfer started");
+ return error::kNoError;
+ }
+ delegate->WaitForTransferCompletion();
+ ProcessFinishedAsyncTransfers();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitAllAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* data) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM");
+
+ GetAsyncPixelTransferManager()->WaitAllAsyncTexImage2D();
+ ProcessFinishedAsyncTransfers();
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::OnTextureRefDetachedFromFramebuffer(
+ TextureRef* texture_ref) {
+ Texture* texture = texture_ref->texture();
+ DoDidUseTexImageIfNeeded(texture, texture->target());
+}
+
+void GLES2DecoderImpl::OnOutOfMemoryError() {
+ if (lose_context_when_out_of_memory_) {
+ group_->LoseContexts(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ LoseContext(GL_GUILTY_CONTEXT_RESET_ARB);
+ }
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/gles2_cmd_decoder_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
new file mode 100644
index 0000000..5c94b93
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -0,0 +1,261 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GLES2Decoder class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_context.h"
+
+namespace gfx {
+class GLContext;
+class GLSurface;
+}
+
+namespace gpu {
+
+class AsyncPixelTransferDelegate;
+class AsyncPixelTransferManager;
+struct Mailbox;
+
+namespace gles2 {
+
+class ContextGroup;
+class ErrorState;
+class GLES2Util;
+class ImageManager;
+class Logger;
+class QueryManager;
+class VertexArrayManager;
+struct ContextState;
+
+struct DisallowedFeatures {
+ DisallowedFeatures()
+ : gpu_memory_manager(false) {
+ }
+
+ bool gpu_memory_manager;
+};
+
+typedef base::Callback<void(const std::string& key,
+ const std::string& shader)> ShaderCacheCallback;
+
+// This class implements the AsyncAPIInterface interface, decoding GLES2
+// commands and calling GL.
+class GPU_EXPORT GLES2Decoder : public base::SupportsWeakPtr<GLES2Decoder>,
+ public CommonDecoder {
+ public:
+ typedef error::Error Error;
+ typedef base::Callback<bool(uint32 id)> WaitSyncPointCallback;
+
+ // The default stencil mask, which has all bits set. This really should be a
+ // GLuint, but we can't #include gl_bindings.h in this file without causing
+ // macro redefinitions.
+ static const unsigned int kDefaultStencilMask;
+
+ // Creates a decoder.
+ static GLES2Decoder* Create(ContextGroup* group);
+
+ virtual ~GLES2Decoder();
+
+ bool initialized() const {
+ return initialized_;
+ }
+
+ void set_initialized() {
+ initialized_ = true;
+ }
+
+ bool debug() const {
+ return debug_;
+ }
+
+ // Set to true to call glGetError after every command.
+ void set_debug(bool debug) {
+ debug_ = debug;
+ }
+
+ bool log_commands() const {
+ return log_commands_;
+ }
+
+ // Set to true to LOG every command.
+ void set_log_commands(bool log_commands) {
+ log_commands_ = log_commands;
+ }
+
+ // Initializes the graphics context. Can create an offscreen
+ // decoder with a frame buffer that can be referenced from the parent.
+ // Takes ownership of GLContext.
+ // Parameters:
+ // surface: the GL surface to render to.
+ // context: the GL context to render to.
+ // offscreen: whether to make the context offscreen or not. When FBO 0 is
+ // bound, offscreen contexts render to an internal buffer, onscreen ones
+ // to the surface.
+ // size: the size if the GL context is offscreen.
+ // Returns:
+ // true if successful.
+ virtual bool Initialize(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) = 0;
+
+ // Destroys the graphics context.
+ virtual void Destroy(bool have_context) = 0;
+
+ // Set the surface associated with the default FBO.
+ virtual void SetSurface(const scoped_refptr<gfx::GLSurface>& surface) = 0;
+
+ virtual void ProduceFrontBuffer(const Mailbox& mailbox) = 0;
+
+ // Resize an offscreen frame buffer.
+ virtual bool ResizeOffscreenFrameBuffer(const gfx::Size& size) = 0;
+
+ // Make this decoder's GL context current.
+ virtual bool MakeCurrent() = 0;
+
+ // Gets the GLES2 Util which holds info.
+ virtual GLES2Util* GetGLES2Util() = 0;
+
+ // Gets the associated GLContext.
+ virtual gfx::GLContext* GetGLContext() = 0;
+
+ // Gets the associated ContextGroup
+ virtual ContextGroup* GetContextGroup() = 0;
+
+ virtual Capabilities GetCapabilities() = 0;
+
+ // Restores all of the decoder GL state.
+ virtual void RestoreState(const ContextState* prev_state) = 0;
+
+ // Restore States.
+ virtual void RestoreActiveTexture() const = 0;
+ virtual void RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const = 0;
+ virtual void RestoreActiveTextureUnitBinding(unsigned int target) const = 0;
+ virtual void RestoreBufferBindings() const = 0;
+ virtual void RestoreFramebufferBindings() const = 0;
+ virtual void RestoreRenderbufferBindings() = 0;
+ virtual void RestoreGlobalState() const = 0;
+ virtual void RestoreProgramBindings() const = 0;
+ virtual void RestoreTextureState(unsigned service_id) const = 0;
+ virtual void RestoreTextureUnitBindings(unsigned unit) const = 0;
+
+ virtual void ClearAllAttributes() const = 0;
+ virtual void RestoreAllAttributes() const = 0;
+
+ virtual void SetIgnoreCachedStateForTest(bool ignore) = 0;
+
+ // Gets the QueryManager for this context.
+ virtual QueryManager* GetQueryManager() = 0;
+
+ // Gets the VertexArrayManager for this context.
+ virtual VertexArrayManager* GetVertexArrayManager() = 0;
+
+ // Gets the ImageManager for this context.
+ virtual ImageManager* GetImageManager() = 0;
+
+ // Process any pending queries. Returns false if there are no pending queries.
+ virtual bool ProcessPendingQueries() = 0;
+
+ // Returns false if there are no idle work to be made.
+ virtual bool HasMoreIdleWork() = 0;
+
+ virtual void PerformIdleWork() = 0;
+
+ // Sets a callback which is called when a glResizeCHROMIUM command
+ // is processed.
+ virtual void SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) = 0;
+
+ // Interface to performing async pixel transfers.
+ virtual AsyncPixelTransferManager* GetAsyncPixelTransferManager() = 0;
+ virtual void ResetAsyncPixelTransferManagerForTest() = 0;
+ virtual void SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) = 0;
+
+ // Get the service texture ID corresponding to a client texture ID.
+ // If no such record is found then return false.
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id);
+
+ // Provides detail about a lost context if one occurred.
+ virtual error::ContextLostReason GetContextLostReason() = 0;
+
+ // Clears a level of a texture
+ // Returns false if a GL error should be generated.
+ virtual bool ClearLevel(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) = 0;
+
+ virtual ErrorState* GetErrorState() = 0;
+
+ // A callback for messages from the decoder.
+ virtual void SetShaderCacheCallback(const ShaderCacheCallback& callback) = 0;
+
+ // Sets the callback for waiting on a sync point. The callback returns the
+ // scheduling status (i.e. true if the channel is still scheduled).
+ virtual void SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) = 0;
+
+ virtual void WaitForReadPixels(base::Closure callback) = 0;
+ virtual uint32 GetTextureUploadCount() = 0;
+ virtual base::TimeDelta GetTotalTextureUploadTime() = 0;
+ virtual base::TimeDelta GetTotalProcessingCommandsTime() = 0;
+ virtual void AddProcessingCommandsTime(base::TimeDelta) = 0;
+
+ // Returns true if the context was lost either by GL_ARB_robustness, forced
+ // context loss or command buffer parse error.
+ virtual bool WasContextLost() = 0;
+
+ // Returns true if the context was lost specifically by GL_ARB_robustness.
+ virtual bool WasContextLostByRobustnessExtension() = 0;
+
+ // Lose this context.
+ virtual void LoseContext(uint32 reset_status) = 0;
+
+ virtual Logger* GetLogger() = 0;
+
+ virtual void BeginDecoding();
+ virtual void EndDecoding();
+
+ virtual const ContextState* GetContextState() = 0;
+
+ protected:
+ GLES2Decoder();
+
+ private:
+ bool initialized_;
+ bool debug_;
+ bool log_commands_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2Decoder);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
new file mode 100644
index 0000000..dade363
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -0,0 +1,3382 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
+
+error::Error GLES2DecoderImpl::HandleActiveTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ActiveTexture& c =
+ *static_cast<const gles2::cmds::ActiveTexture*>(cmd_data);
+ (void)c;
+ GLenum texture = static_cast<GLenum>(c.texture);
+ DoActiveTexture(texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleAttachShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AttachShader& c =
+ *static_cast<const gles2::cmds::AttachShader*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLuint shader = c.shader;
+ DoAttachShader(program, shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindBuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindBuffer& c =
+ *static_cast<const gles2::cmds::BindBuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint buffer = c.buffer;
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindBuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindBuffer(target, buffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindFramebuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindFramebuffer& c =
+ *static_cast<const gles2::cmds::BindFramebuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint framebuffer = c.framebuffer;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindFramebuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindFramebuffer(target, framebuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindRenderbuffer& c =
+ *static_cast<const gles2::cmds::BindRenderbuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint renderbuffer = c.renderbuffer;
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindRenderbuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindRenderbuffer(target, renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindTexture& c =
+ *static_cast<const gles2::cmds::BindTexture*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint texture = c.texture;
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindTexture", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexture(target, texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendColor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendColor& c =
+ *static_cast<const gles2::cmds::BlendColor*>(cmd_data);
+ (void)c;
+ GLclampf red = static_cast<GLclampf>(c.red);
+ GLclampf green = static_cast<GLclampf>(c.green);
+ GLclampf blue = static_cast<GLclampf>(c.blue);
+ GLclampf alpha = static_cast<GLclampf>(c.alpha);
+ if (state_.blend_color_red != red || state_.blend_color_green != green ||
+ state_.blend_color_blue != blue || state_.blend_color_alpha != alpha) {
+ state_.blend_color_red = red;
+ state_.blend_color_green = green;
+ state_.blend_color_blue = blue;
+ state_.blend_color_alpha = alpha;
+ glBlendColor(red, green, blue, alpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquation(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendEquation& c =
+ *static_cast<const gles2::cmds::BlendEquation*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->equation.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendEquation", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.blend_equation_rgb != mode ||
+ state_.blend_equation_alpha != mode) {
+ state_.blend_equation_rgb = mode;
+ state_.blend_equation_alpha = mode;
+ glBlendEquation(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendEquationSeparate& c =
+ *static_cast<const gles2::cmds::BlendEquationSeparate*>(cmd_data);
+ (void)c;
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ if (!validators_->equation.IsValid(modeRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendEquationSeparate", modeRGB, "modeRGB");
+ return error::kNoError;
+ }
+ if (!validators_->equation.IsValid(modeAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendEquationSeparate", modeAlpha, "modeAlpha");
+ return error::kNoError;
+ }
+ if (state_.blend_equation_rgb != modeRGB ||
+ state_.blend_equation_alpha != modeAlpha) {
+ state_.blend_equation_rgb = modeRGB;
+ state_.blend_equation_alpha = modeAlpha;
+ glBlendEquationSeparate(modeRGB, modeAlpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendFunc& c =
+ *static_cast<const gles2::cmds::BlendFunc*>(cmd_data);
+ (void)c;
+ GLenum sfactor = static_cast<GLenum>(c.sfactor);
+ GLenum dfactor = static_cast<GLenum>(c.dfactor);
+ if (!validators_->src_blend_factor.IsValid(sfactor)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFunc", sfactor, "sfactor");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dfactor)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFunc", dfactor, "dfactor");
+ return error::kNoError;
+ }
+ if (state_.blend_source_rgb != sfactor || state_.blend_dest_rgb != dfactor ||
+ state_.blend_source_alpha != sfactor ||
+ state_.blend_dest_alpha != dfactor) {
+ state_.blend_source_rgb = sfactor;
+ state_.blend_dest_rgb = dfactor;
+ state_.blend_source_alpha = sfactor;
+ state_.blend_dest_alpha = dfactor;
+ glBlendFunc(sfactor, dfactor);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFuncSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendFuncSeparate& c =
+ *static_cast<const gles2::cmds::BlendFuncSeparate*>(cmd_data);
+ (void)c;
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ if (!validators_->src_blend_factor.IsValid(srcRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFuncSeparate", srcRGB, "srcRGB");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dstRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFuncSeparate", dstRGB, "dstRGB");
+ return error::kNoError;
+ }
+ if (!validators_->src_blend_factor.IsValid(srcAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendFuncSeparate", srcAlpha, "srcAlpha");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dstAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendFuncSeparate", dstAlpha, "dstAlpha");
+ return error::kNoError;
+ }
+ if (state_.blend_source_rgb != srcRGB || state_.blend_dest_rgb != dstRGB ||
+ state_.blend_source_alpha != srcAlpha ||
+ state_.blend_dest_alpha != dstAlpha) {
+ state_.blend_source_rgb = srcRGB;
+ state_.blend_dest_rgb = dstRGB;
+ state_.blend_source_alpha = srcAlpha;
+ state_.blend_dest_alpha = dstAlpha;
+ glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBufferSubData(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BufferSubData& c =
+ *static_cast<const gles2::cmds::BufferSubData*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLintptr offset = static_cast<GLintptr>(c.offset);
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32_t data_size = size;
+ const void* data = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBufferSubData", target, "target");
+ return error::kNoError;
+ }
+ if (size < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glBufferSubData", "size < 0");
+ return error::kNoError;
+ }
+ if (data == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoBufferSubData(target, offset, size, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCheckFramebufferStatus(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CheckFramebufferStatus& c =
+ *static_cast<const gles2::cmds::CheckFramebufferStatus*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ typedef cmds::CheckFramebufferStatus::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCheckFramebufferStatus", target, "target");
+ return error::kNoError;
+ }
+ *result_dst = DoCheckFramebufferStatus(target);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClear(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Clear& c =
+ *static_cast<const gles2::cmds::Clear*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ GLbitfield mask = static_cast<GLbitfield>(c.mask);
+ DoClear(mask);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearColor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearColor& c =
+ *static_cast<const gles2::cmds::ClearColor*>(cmd_data);
+ (void)c;
+ GLclampf red = static_cast<GLclampf>(c.red);
+ GLclampf green = static_cast<GLclampf>(c.green);
+ GLclampf blue = static_cast<GLclampf>(c.blue);
+ GLclampf alpha = static_cast<GLclampf>(c.alpha);
+ if (state_.color_clear_red != red || state_.color_clear_green != green ||
+ state_.color_clear_blue != blue || state_.color_clear_alpha != alpha) {
+ state_.color_clear_red = red;
+ state_.color_clear_green = green;
+ state_.color_clear_blue = blue;
+ state_.color_clear_alpha = alpha;
+ glClearColor(red, green, blue, alpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearDepthf(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearDepthf& c =
+ *static_cast<const gles2::cmds::ClearDepthf*>(cmd_data);
+ (void)c;
+ GLclampf depth = static_cast<GLclampf>(c.depth);
+ if (state_.depth_clear != depth) {
+ state_.depth_clear = depth;
+ glClearDepth(depth);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearStencil(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearStencil& c =
+ *static_cast<const gles2::cmds::ClearStencil*>(cmd_data);
+ (void)c;
+ GLint s = static_cast<GLint>(c.s);
+ if (state_.stencil_clear != s) {
+ state_.stencil_clear = s;
+ glClearStencil(s);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleColorMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ColorMask& c =
+ *static_cast<const gles2::cmds::ColorMask*>(cmd_data);
+ (void)c;
+ GLboolean red = static_cast<GLboolean>(c.red);
+ GLboolean green = static_cast<GLboolean>(c.green);
+ GLboolean blue = static_cast<GLboolean>(c.blue);
+ GLboolean alpha = static_cast<GLboolean>(c.alpha);
+ if (state_.color_mask_red != red || state_.color_mask_green != green ||
+ state_.color_mask_blue != blue || state_.color_mask_alpha != alpha) {
+ state_.color_mask_red = red;
+ state_.color_mask_green = green;
+ state_.color_mask_blue = blue;
+ state_.color_mask_alpha = alpha;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompileShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompileShader& c =
+ *static_cast<const gles2::cmds::CompileShader*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ DoCompileShader(shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexSubImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexSubImage2D& c =
+ *static_cast<const gles2::cmds::CompressedTexSubImage2D*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLsizei imageSize = static_cast<GLsizei>(c.imageSize);
+ uint32_t data_size = imageSize;
+ const void* data = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", format, "format");
+ return error::kNoError;
+ }
+ if (imageSize < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "imageSize < 0");
+ return error::kNoError;
+ }
+ if (data == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTexImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTexImage2D& c =
+ *static_cast<const gles2::cmds::CopyTexImage2D*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopyTexImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCopyTexImage2D", internalformat, "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexImage2D", "height < 0");
+ return error::kNoError;
+ }
+ DoCopyTexImage2D(target, level, internalformat, x, y, width, height, border);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTexSubImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTexSubImage2D& c =
+ *static_cast<const gles2::cmds::CopyTexSubImage2D*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopyTexSubImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ DoCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCreateProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateProgram& c =
+ *static_cast<const gles2::cmds::CreateProgram*>(cmd_data);
+ (void)c;
+ uint32_t client_id = c.client_id;
+ if (!CreateProgramHelper(client_id)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCreateShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateShader& c =
+ *static_cast<const gles2::cmds::CreateShader*>(cmd_data);
+ (void)c;
+ GLenum type = static_cast<GLenum>(c.type);
+ if (!validators_->shader_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCreateShader", type, "type");
+ return error::kNoError;
+ }
+ uint32_t client_id = c.client_id;
+ if (!CreateShaderHelper(type, client_id)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCullFace(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CullFace& c =
+ *static_cast<const gles2::cmds::CullFace*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->face_type.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCullFace", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.cull_mode != mode) {
+ state_.cull_mode = mode;
+ glCullFace(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteBuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteBuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteBuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* buffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (buffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteBuffersHelper(n, buffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteFramebuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteFramebuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteFramebuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* framebuffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (framebuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteFramebuffersHelper(n, framebuffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteRenderbuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteRenderbuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteRenderbuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* renderbuffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (renderbuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteRenderbuffersHelper(n, renderbuffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteTexturesImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteTexturesImmediate& c =
+ *static_cast<const gles2::cmds::DeleteTexturesImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* textures =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteTexturesHelper(n, textures);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthFunc& c =
+ *static_cast<const gles2::cmds::DepthFunc*>(cmd_data);
+ (void)c;
+ GLenum func = static_cast<GLenum>(c.func);
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glDepthFunc", func, "func");
+ return error::kNoError;
+ }
+ if (state_.depth_func != func) {
+ state_.depth_func = func;
+ glDepthFunc(func);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthMask& c =
+ *static_cast<const gles2::cmds::DepthMask*>(cmd_data);
+ (void)c;
+ GLboolean flag = static_cast<GLboolean>(c.flag);
+ if (state_.depth_mask != flag) {
+ state_.depth_mask = flag;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthRangef(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthRangef& c =
+ *static_cast<const gles2::cmds::DepthRangef*>(cmd_data);
+ (void)c;
+ GLclampf zNear = static_cast<GLclampf>(c.zNear);
+ GLclampf zFar = static_cast<GLclampf>(c.zFar);
+ DoDepthRangef(zNear, zFar);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDetachShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DetachShader& c =
+ *static_cast<const gles2::cmds::DetachShader*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLuint shader = c.shader;
+ DoDetachShader(program, shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisable(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Disable& c =
+ *static_cast<const gles2::cmds::Disable*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glDisable", cap, "cap");
+ return error::kNoError;
+ }
+ DoDisable(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisableVertexAttribArray(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DisableVertexAttribArray& c =
+ *static_cast<const gles2::cmds::DisableVertexAttribArray*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ DoDisableVertexAttribArray(index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEnable(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Enable& c =
+ *static_cast<const gles2::cmds::Enable*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glEnable", cap, "cap");
+ return error::kNoError;
+ }
+ DoEnable(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEnableVertexAttribArray(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EnableVertexAttribArray& c =
+ *static_cast<const gles2::cmds::EnableVertexAttribArray*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ DoEnableVertexAttribArray(index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFinish(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Finish& c =
+ *static_cast<const gles2::cmds::Finish*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ DoFinish();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFlush(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Flush& c =
+ *static_cast<const gles2::cmds::Flush*>(cmd_data);
+ (void)c;
+ DoFlush();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferRenderbuffer& c =
+ *static_cast<const gles2::cmds::FramebufferRenderbuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum renderbuffertarget = static_cast<GLenum>(c.renderbuffertarget);
+ GLuint renderbuffer = c.renderbuffer;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_target.IsValid(renderbuffertarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", renderbuffertarget, "renderbuffertarget");
+ return error::kNoError;
+ }
+ DoFramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferTexture2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferTexture2D& c =
+ *static_cast<const gles2::cmds::FramebufferTexture2D*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum textarget = static_cast<GLenum>(c.textarget);
+ GLuint texture = c.texture;
+ GLint level = static_cast<GLint>(c.level);
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glFramebufferTexture2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2D", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->texture_target.IsValid(textarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2D", textarget, "textarget");
+ return error::kNoError;
+ }
+ DoFramebufferTexture2D(target, attachment, textarget, texture, level);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFrontFace(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FrontFace& c =
+ *static_cast<const gles2::cmds::FrontFace*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->face_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glFrontFace", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.front_face != mode) {
+ state_.front_face = mode;
+ glFrontFace(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenBuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenBuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenBuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* buffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (buffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenBuffersHelper(n, buffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenerateMipmap(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenerateMipmap& c =
+ *static_cast<const gles2::cmds::GenerateMipmap*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGenerateMipmap", target, "target");
+ return error::kNoError;
+ }
+ DoGenerateMipmap(target);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenFramebuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenFramebuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenFramebuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* framebuffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (framebuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenFramebuffersHelper(n, framebuffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenRenderbuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenRenderbuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenRenderbuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* renderbuffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (renderbuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenRenderbuffersHelper(n, renderbuffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenTexturesImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenTexturesImmediate& c =
+ *static_cast<const gles2::cmds::GenTexturesImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* textures =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenTexturesHelper(n, textures)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetBooleanv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetBooleanv& c =
+ *static_cast<const gles2::cmds::GetBooleanv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetBooleanv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLboolean* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBooleanv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetBooleanv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBooleanv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetBooleanv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetBufferParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetBufferParameteriv& c =
+ *static_cast<const gles2::cmds::GetBufferParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetBufferParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBufferParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBufferParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBufferParameteriv(target, pname, params);
+ result->SetNumResults(num_values);
+ return error::kNoError;
+}
+error::Error GLES2DecoderImpl::HandleGetError(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetError& c =
+ *static_cast<const gles2::cmds::GetError*>(cmd_data);
+ (void)c;
+ typedef cmds::GetError::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = GetErrorState()->GetGLError();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetFloatv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetFloatv& c =
+ *static_cast<const gles2::cmds::GetFloatv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetFloatv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetFloatv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetFloatv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetFloatv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetFloatv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetFramebufferAttachmentParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetFramebufferAttachmentParameteriv& c =
+ *static_cast<const gles2::cmds::GetFramebufferAttachmentParameteriv*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->frame_buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetFramebufferAttachmentParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetFramebufferAttachmentParameteriv(target, attachment, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetFramebufferAttachmentParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetIntegerv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetIntegerv& c =
+ *static_cast<const gles2::cmds::GetIntegerv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetIntegerv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetIntegerv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetIntegerv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetIntegerv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramiv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramiv& c =
+ *static_cast<const gles2::cmds::GetProgramiv*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetProgramiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->program_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetProgramiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetProgramiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetProgramiv(program, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetProgramiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetRenderbufferParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetRenderbufferParameteriv& c =
+ *static_cast<const gles2::cmds::GetRenderbufferParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetRenderbufferParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetRenderbufferParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetRenderbufferParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetRenderbufferParameteriv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetRenderbufferParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderiv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderiv& c =
+ *static_cast<const gles2::cmds::GetShaderiv*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetShaderiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->shader_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetShaderiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetShaderiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetShaderiv(shader, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetShaderiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTexParameterfv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTexParameterfv& c =
+ *static_cast<const gles2::cmds::GetTexParameterfv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetTexParameterfv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->get_tex_param_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameterfv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameterfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetTexParameterfv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetTexParameterfv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetTexParameterfv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTexParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTexParameteriv& c =
+ *static_cast<const gles2::cmds::GetTexParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetTexParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->get_tex_param_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetTexParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetTexParameteriv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetTexParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribfv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribfv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribfv*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribfv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->vertex_attribute.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetVertexAttribfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetVertexAttribfv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetVertexAttribfv(index, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetVertexAttribfv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribiv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribiv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribiv*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->vertex_attribute.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetVertexAttribiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetVertexAttribiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetVertexAttribiv(index, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetVertexAttribiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleHint(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Hint& c = *static_cast<const gles2::cmds::Hint*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->hint_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glHint", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->hint_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glHint", mode, "mode");
+ return error::kNoError;
+ }
+ switch (target) {
+ case GL_GENERATE_MIPMAP_HINT:
+ if (state_.hint_generate_mipmap != mode) {
+ state_.hint_generate_mipmap = mode;
+ glHint(target, mode);
+ }
+ break;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ if (state_.hint_fragment_shader_derivative != mode) {
+ state_.hint_fragment_shader_derivative = mode;
+ glHint(target, mode);
+ }
+ break;
+ default:
+ NOTREACHED();
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsBuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsBuffer& c =
+ *static_cast<const gles2::cmds::IsBuffer*>(cmd_data);
+ (void)c;
+ GLuint buffer = c.buffer;
+ typedef cmds::IsBuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsBuffer(buffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsEnabled(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsEnabled& c =
+ *static_cast<const gles2::cmds::IsEnabled*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ typedef cmds::IsEnabled::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glIsEnabled", cap, "cap");
+ return error::kNoError;
+ }
+ *result_dst = DoIsEnabled(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsFramebuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsFramebuffer& c =
+ *static_cast<const gles2::cmds::IsFramebuffer*>(cmd_data);
+ (void)c;
+ GLuint framebuffer = c.framebuffer;
+ typedef cmds::IsFramebuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsFramebuffer(framebuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsProgram& c =
+ *static_cast<const gles2::cmds::IsProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ typedef cmds::IsProgram::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsRenderbuffer& c =
+ *static_cast<const gles2::cmds::IsRenderbuffer*>(cmd_data);
+ (void)c;
+ GLuint renderbuffer = c.renderbuffer;
+ typedef cmds::IsRenderbuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsRenderbuffer(renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsShader& c =
+ *static_cast<const gles2::cmds::IsShader*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ typedef cmds::IsShader::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsShader(shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsTexture& c =
+ *static_cast<const gles2::cmds::IsTexture*>(cmd_data);
+ (void)c;
+ GLuint texture = c.texture;
+ typedef cmds::IsTexture::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsTexture(texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLineWidth(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LineWidth& c =
+ *static_cast<const gles2::cmds::LineWidth*>(cmd_data);
+ (void)c;
+ GLfloat width = static_cast<GLfloat>(c.width);
+ if (width <= 0.0f || base::IsNaN(width)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "LineWidth", "width out of range");
+ return error::kNoError;
+ }
+ if (state_.line_width != width) {
+ state_.line_width = width;
+ glLineWidth(width);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLinkProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LinkProgram& c =
+ *static_cast<const gles2::cmds::LinkProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoLinkProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePolygonOffset(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PolygonOffset& c =
+ *static_cast<const gles2::cmds::PolygonOffset*>(cmd_data);
+ (void)c;
+ GLfloat factor = static_cast<GLfloat>(c.factor);
+ GLfloat units = static_cast<GLfloat>(c.units);
+ if (state_.polygon_offset_factor != factor ||
+ state_.polygon_offset_units != units) {
+ state_.polygon_offset_factor = factor;
+ state_.polygon_offset_units = units;
+ glPolygonOffset(factor, units);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleReleaseShaderCompiler(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReleaseShaderCompiler& c =
+ *static_cast<const gles2::cmds::ReleaseShaderCompiler*>(cmd_data);
+ (void)c;
+ DoReleaseShaderCompiler();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorage(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorage& c =
+ *static_cast<const gles2::cmds::RenderbufferStorage*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorage", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorage", internalformat, "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorage", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorage", "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorage(target, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleSampleCoverage(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::SampleCoverage& c =
+ *static_cast<const gles2::cmds::SampleCoverage*>(cmd_data);
+ (void)c;
+ GLclampf value = static_cast<GLclampf>(c.value);
+ GLboolean invert = static_cast<GLboolean>(c.invert);
+ DoSampleCoverage(value, invert);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleScissor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Scissor& c =
+ *static_cast<const gles2::cmds::Scissor*>(cmd_data);
+ (void)c;
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScissor", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScissor", "height < 0");
+ return error::kNoError;
+ }
+ if (state_.scissor_x != x || state_.scissor_y != y ||
+ state_.scissor_width != width || state_.scissor_height != height) {
+ state_.scissor_x = x;
+ state_.scissor_y = y;
+ state_.scissor_width = width;
+ state_.scissor_height = height;
+ glScissor(x, y, width, height);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilFunc& c =
+ *static_cast<const gles2::cmds::StencilFunc*>(cmd_data);
+ (void)c;
+ GLenum func = static_cast<GLenum>(c.func);
+ GLint ref = static_cast<GLint>(c.ref);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFunc", func, "func");
+ return error::kNoError;
+ }
+ if (state_.stencil_front_func != func || state_.stencil_front_ref != ref ||
+ state_.stencil_front_mask != mask || state_.stencil_back_func != func ||
+ state_.stencil_back_ref != ref || state_.stencil_back_mask != mask) {
+ state_.stencil_front_func = func;
+ state_.stencil_front_ref = ref;
+ state_.stencil_front_mask = mask;
+ state_.stencil_back_func = func;
+ state_.stencil_back_ref = ref;
+ state_.stencil_back_mask = mask;
+ glStencilFunc(func, ref, mask);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilFuncSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilFuncSeparate& c =
+ *static_cast<const gles2::cmds::StencilFuncSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLenum func = static_cast<GLenum>(c.func);
+ GLint ref = static_cast<GLint>(c.ref);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFuncSeparate", face, "face");
+ return error::kNoError;
+ }
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFuncSeparate", func, "func");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_func != func ||
+ state_.stencil_front_ref != ref ||
+ state_.stencil_front_mask != mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_func != func ||
+ state_.stencil_back_ref != ref ||
+ state_.stencil_back_mask != mask;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_func = func;
+ state_.stencil_front_ref = ref;
+ state_.stencil_front_mask = mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_func = func;
+ state_.stencil_back_ref = ref;
+ state_.stencil_back_mask = mask;
+ }
+ glStencilFuncSeparate(face, func, ref, mask);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilMask& c =
+ *static_cast<const gles2::cmds::StencilMask*>(cmd_data);
+ (void)c;
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (state_.stencil_front_writemask != mask ||
+ state_.stencil_back_writemask != mask) {
+ state_.stencil_front_writemask = mask;
+ state_.stencil_back_writemask = mask;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilMaskSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilMaskSeparate& c =
+ *static_cast<const gles2::cmds::StencilMaskSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilMaskSeparate", face, "face");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_writemask != mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_writemask != mask;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_writemask = mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_writemask = mask;
+ }
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilOp(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilOp& c =
+ *static_cast<const gles2::cmds::StencilOp*>(cmd_data);
+ (void)c;
+ GLenum fail = static_cast<GLenum>(c.fail);
+ GLenum zfail = static_cast<GLenum>(c.zfail);
+ GLenum zpass = static_cast<GLenum>(c.zpass);
+ if (!validators_->stencil_op.IsValid(fail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", fail, "fail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zfail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", zfail, "zfail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zpass)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", zpass, "zpass");
+ return error::kNoError;
+ }
+ if (state_.stencil_front_fail_op != fail ||
+ state_.stencil_front_z_fail_op != zfail ||
+ state_.stencil_front_z_pass_op != zpass ||
+ state_.stencil_back_fail_op != fail ||
+ state_.stencil_back_z_fail_op != zfail ||
+ state_.stencil_back_z_pass_op != zpass) {
+ state_.stencil_front_fail_op = fail;
+ state_.stencil_front_z_fail_op = zfail;
+ state_.stencil_front_z_pass_op = zpass;
+ state_.stencil_back_fail_op = fail;
+ state_.stencil_back_z_fail_op = zfail;
+ state_.stencil_back_z_pass_op = zpass;
+ glStencilOp(fail, zfail, zpass);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilOpSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilOpSeparate& c =
+ *static_cast<const gles2::cmds::StencilOpSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLenum fail = static_cast<GLenum>(c.fail);
+ GLenum zfail = static_cast<GLenum>(c.zfail);
+ GLenum zpass = static_cast<GLenum>(c.zpass);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", face, "face");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(fail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", fail, "fail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zfail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", zfail, "zfail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zpass)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", zpass, "zpass");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_fail_op != fail ||
+ state_.stencil_front_z_fail_op != zfail ||
+ state_.stencil_front_z_pass_op != zpass;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_fail_op != fail ||
+ state_.stencil_back_z_fail_op != zfail ||
+ state_.stencil_back_z_pass_op != zpass;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_fail_op = fail;
+ state_.stencil_front_z_fail_op = zfail;
+ state_.stencil_front_z_pass_op = zpass;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_fail_op = fail;
+ state_.stencil_back_z_fail_op = zfail;
+ state_.stencil_back_z_pass_op = zpass;
+ }
+ glStencilOpSeparate(face, fail, zfail, zpass);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterf(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterf& c =
+ *static_cast<const gles2::cmds::TexParameterf*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLfloat param = static_cast<GLfloat>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterf", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterf", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameterf(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterfvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterfvImmediate& c =
+ *static_cast<const gles2::cmds::TexParameterfvImmediate*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* params =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterfv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoTexParameterfv(target, pname, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameteri(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameteri& c =
+ *static_cast<const gles2::cmds::TexParameteri*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLint param = static_cast<GLint>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameteri(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterivImmediate& c =
+ *static_cast<const gles2::cmds::TexParameterivImmediate*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLint), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* params =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoTexParameteriv(target, pname, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1f& c =
+ *static_cast<const gles2::cmds::Uniform1f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat temp[1] = {
+ x,
+ };
+ DoUniform1fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform1fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform1fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1i& c =
+ *static_cast<const gles2::cmds::Uniform1i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ DoUniform1i(location, x);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform1ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform1iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2f& c =
+ *static_cast<const gles2::cmds::Uniform2f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat temp[2] = {
+ x, y,
+ };
+ DoUniform2fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform2fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform2fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2i& c =
+ *static_cast<const gles2::cmds::Uniform2i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint temp[2] = {
+ x, y,
+ };
+ DoUniform2iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform2ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform2iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3f& c =
+ *static_cast<const gles2::cmds::Uniform3f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat temp[3] = {
+ x, y, z,
+ };
+ DoUniform3fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform3fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform3fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3i& c =
+ *static_cast<const gles2::cmds::Uniform3i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint z = static_cast<GLint>(c.z);
+ GLint temp[3] = {
+ x, y, z,
+ };
+ DoUniform3iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform3ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform3iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4f& c =
+ *static_cast<const gles2::cmds::Uniform4f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat w = static_cast<GLfloat>(c.w);
+ GLfloat temp[4] = {
+ x, y, z, w,
+ };
+ DoUniform4fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform4fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform4fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4i& c =
+ *static_cast<const gles2::cmds::Uniform4i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint z = static_cast<GLint>(c.z);
+ GLint w = static_cast<GLint>(c.w);
+ GLint temp[4] = {
+ x, y, z, w,
+ };
+ DoUniform4iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform4ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform4iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix2fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix2fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix2fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix3fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix3fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 9, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix3fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix4fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix4fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 16, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix4fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUseProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UseProgram& c =
+ *static_cast<const gles2::cmds::UseProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoUseProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleValidateProgram(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ValidateProgram& c =
+ *static_cast<const gles2::cmds::ValidateProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoValidateProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib1f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib1f& c =
+ *static_cast<const gles2::cmds::VertexAttrib1f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ DoVertexAttrib1f(indx, x);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib1fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib1fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib1fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib1fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib2f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib2f& c =
+ *static_cast<const gles2::cmds::VertexAttrib2f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ DoVertexAttrib2f(indx, x, y);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib2fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib2fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib2fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib3f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib3f& c =
+ *static_cast<const gles2::cmds::VertexAttrib3f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ DoVertexAttrib3f(indx, x, y, z);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib3fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib3fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib3fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib4f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib4f& c =
+ *static_cast<const gles2::cmds::VertexAttrib4f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat w = static_cast<GLfloat>(c.w);
+ DoVertexAttrib4f(indx, x, y, z, w);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib4fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib4fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib4fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleViewport(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Viewport& c =
+ *static_cast<const gles2::cmds::Viewport*>(cmd_data);
+ (void)c;
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glViewport", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glViewport", "height < 0");
+ return error::kNoError;
+ }
+ DoViewport(x, y, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlitFramebufferCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlitFramebufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::BlitFramebufferCHROMIUM*>(cmd_data);
+ (void)c;
+ if (!features().chromium_framebuffer_multisample) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBlitFramebufferCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ error::Error error;
+ error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLint srcX0 = static_cast<GLint>(c.srcX0);
+ GLint srcY0 = static_cast<GLint>(c.srcY0);
+ GLint srcX1 = static_cast<GLint>(c.srcX1);
+ GLint srcY1 = static_cast<GLint>(c.srcY1);
+ GLint dstX0 = static_cast<GLint>(c.dstX0);
+ GLint dstY0 = static_cast<GLint>(c.dstY0);
+ GLint dstX1 = static_cast<GLint>(c.dstX1);
+ GLint dstY1 = static_cast<GLint>(c.dstY1);
+ GLbitfield mask = static_cast<GLbitfield>(c.mask);
+ GLenum filter = static_cast<GLenum>(c.filter);
+ if (!validators_->blit_filter.IsValid(filter)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlitFramebufferCHROMIUM", filter, "filter");
+ return error::kNoError;
+ }
+ DoBlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorageMultisampleCHROMIUM& c =
+ *static_cast<const gles2::cmds::RenderbufferStorageMultisampleCHROMIUM*>(
+ cmd_data);
+ (void)c;
+ if (!features().chromium_framebuffer_multisample) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "samples < 0");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorageMultisampleCHROMIUM",
+ internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorageMultisampleEXT& c =
+ *static_cast<const gles2::cmds::RenderbufferStorageMultisampleEXT*>(
+ cmd_data);
+ (void)c;
+ if (!features().multisampled_render_to_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleEXT", target, "target");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "samples < 0");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorageMultisampleEXT",
+ internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferTexture2DMultisampleEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferTexture2DMultisampleEXT& c =
+ *static_cast<const gles2::cmds::FramebufferTexture2DMultisampleEXT*>(
+ cmd_data);
+ (void)c;
+ if (!features().multisampled_render_to_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glFramebufferTexture2DMultisampleEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum textarget = static_cast<GLenum>(c.textarget);
+ GLuint texture = c.texture;
+ GLint level = static_cast<GLint>(c.level);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->texture_target.IsValid(textarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", textarget, "textarget");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "samples < 0");
+ return error::kNoError;
+ }
+ DoFramebufferTexture2DMultisample(
+ target, attachment, textarget, texture, level, samples);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexStorage2DEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexStorage2DEXT& c =
+ *static_cast<const gles2::cmds::TexStorage2DEXT*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei levels = static_cast<GLsizei>(c.levels);
+ GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DEXT", target, "target");
+ return error::kNoError;
+ }
+ if (levels < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "levels < 0");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format_storage.IsValid(internalFormat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glTexStorage2DEXT", internalFormat, "internalFormat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "height < 0");
+ return error::kNoError;
+ }
+ DoTexStorage2DEXT(target, levels, internalFormat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenQueriesEXTImmediate& c =
+ *static_cast<const gles2::cmds::GenQueriesEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* queries =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenQueriesEXTHelper(n, queries)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteQueriesEXTImmediate& c =
+ *static_cast<const gles2::cmds::DeleteQueriesEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* queries =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleInsertEventMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::InsertEventMarkerEXT& c =
+ *static_cast<const gles2::cmds::InsertEventMarkerEXT*>(cmd_data);
+ (void)c;
+
+ GLuint bucket_id = static_cast<GLuint>(c.bucket_id);
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string str;
+ if (!bucket->GetAsString(&str)) {
+ return error::kInvalidArguments;
+ }
+ DoInsertEventMarkerEXT(0, str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePushGroupMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PushGroupMarkerEXT& c =
+ *static_cast<const gles2::cmds::PushGroupMarkerEXT*>(cmd_data);
+ (void)c;
+
+ GLuint bucket_id = static_cast<GLuint>(c.bucket_id);
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string str;
+ if (!bucket->GetAsString(&str)) {
+ return error::kInvalidArguments;
+ }
+ DoPushGroupMarkerEXT(0, str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePopGroupMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PopGroupMarkerEXT& c =
+ *static_cast<const gles2::cmds::PopGroupMarkerEXT*>(cmd_data);
+ (void)c;
+ DoPopGroupMarkerEXT();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenVertexArraysOESImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenVertexArraysOESImmediate& c =
+ *static_cast<const gles2::cmds::GenVertexArraysOESImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* arrays =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (arrays == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenVertexArraysOESHelper(n, arrays)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteVertexArraysOESImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteVertexArraysOESImmediate& c =
+ *static_cast<const gles2::cmds::DeleteVertexArraysOESImmediate*>(
+ cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* arrays =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (arrays == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteVertexArraysOESHelper(n, arrays);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsVertexArrayOES(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsVertexArrayOES& c =
+ *static_cast<const gles2::cmds::IsVertexArrayOES*>(cmd_data);
+ (void)c;
+ GLuint array = c.array;
+ typedef cmds::IsVertexArrayOES::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsVertexArrayOES(array);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindVertexArrayOES(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindVertexArrayOES& c =
+ *static_cast<const gles2::cmds::BindVertexArrayOES*>(cmd_data);
+ (void)c;
+ GLuint array = c.array;
+ DoBindVertexArrayOES(array);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleSwapBuffers(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::SwapBuffers& c =
+ *static_cast<const gles2::cmds::SwapBuffers*>(cmd_data);
+ (void)c;
+ DoSwapBuffers();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetMaxValueInBufferCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetMaxValueInBufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetMaxValueInBufferCHROMIUM*>(cmd_data);
+ (void)c;
+ GLuint buffer_id = c.buffer_id;
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLuint offset = static_cast<GLuint>(c.offset);
+ typedef cmds::GetMaxValueInBufferCHROMIUM::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetMaxValueInBufferCHROMIUM", "count < 0");
+ return error::kNoError;
+ }
+ if (!validators_->get_max_index_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetMaxValueInBufferCHROMIUM", type, "type");
+ return error::kNoError;
+ }
+ *result_dst = DoGetMaxValueInBufferCHROMIUM(buffer_id, count, type, offset);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexImageIOSurface2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexImageIOSurface2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::TexImageIOSurface2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLuint ioSurfaceId = static_cast<GLuint>(c.ioSurfaceId);
+ GLuint plane = static_cast<GLuint>(c.plane);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glTexImageIOSurface2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "height < 0");
+ return error::kNoError;
+ }
+ DoTexImageIOSurface2DCHROMIUM(target, width, height, ioSurfaceId, plane);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTextureCHROMIUM& c =
+ *static_cast<const gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint level = static_cast<GLint>(c.level);
+ GLint internalformat = static_cast<GLint>(c.internalformat);
+ GLenum dest_type = static_cast<GLenum>(c.dest_type);
+ if (!validators_->texture_internal_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureCHROMIUM",
+ "internalformat GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ if (!validators_->pixel_type.IsValid(dest_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCopyTextureCHROMIUM", dest_type, "dest_type");
+ return error::kNoError;
+ }
+ DoCopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleProduceTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ProduceTextureCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ProduceTextureCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glProduceTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoProduceTextureCHROMIUM(target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleProduceTextureDirectCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ProduceTextureDirectCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ProduceTextureDirectCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLuint texture = c.texture;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glProduceTextureDirectCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoProduceTextureDirectCHROMIUM(texture, target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleConsumeTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ConsumeTextureCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ConsumeTextureCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glConsumeTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoConsumeTextureCHROMIUM(target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::BindTexImage2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBindTexImage2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleReleaseTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReleaseTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::ReleaseTexImage2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glReleaseTexImage2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ DoReleaseTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTraceEndCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TraceEndCHROMIUM& c =
+ *static_cast<const gles2::cmds::TraceEndCHROMIUM*>(cmd_data);
+ (void)c;
+ DoTraceEndCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDiscardFramebufferEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DiscardFramebufferEXTImmediate& c =
+ *static_cast<const gles2::cmds::DiscardFramebufferEXTImmediate*>(
+ cmd_data);
+ (void)c;
+ if (!features().ext_discard_framebuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glDiscardFramebufferEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLenum), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* attachments =
+ GetImmediateDataAs<const GLenum*>(c, data_size, immediate_data_size);
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glDiscardFramebufferEXT", "count < 0");
+ return error::kNoError;
+ }
+ if (attachments == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoDiscardFramebufferEXT(target, count, attachments);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLoseContextCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LoseContextCHROMIUM& c =
+ *static_cast<const gles2::cmds::LoseContextCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum current = static_cast<GLenum>(c.current);
+ GLenum other = static_cast<GLenum>(c.other);
+ if (!validators_->reset_status.IsValid(current)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glLoseContextCHROMIUM", current, "current");
+ return error::kNoError;
+ }
+ if (!validators_->reset_status.IsValid(other)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", other, "other");
+ return error::kNoError;
+ }
+ DoLoseContextCHROMIUM(current, other);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawBuffersEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawBuffersEXTImmediate& c =
+ *static_cast<const gles2::cmds::DrawBuffersEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLenum), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* bufs =
+ GetImmediateDataAs<const GLenum*>(c, data_size, immediate_data_size);
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDrawBuffersEXT", "count < 0");
+ return error::kNoError;
+ }
+ if (bufs == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoDrawBuffersEXT(count, bufs);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMatrixLoadfCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::MatrixLoadfCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::MatrixLoadfCHROMIUMImmediate*>(cmd_data);
+ (void)c;
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadfCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 16, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* m =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (!validators_->matrix_mode.IsValid(matrixMode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glMatrixLoadfCHROMIUM", matrixMode, "matrixMode");
+ return error::kNoError;
+ }
+ if (m == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoMatrixLoadfCHROMIUM(matrixMode, m);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMatrixLoadIdentityCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::MatrixLoadIdentityCHROMIUM& c =
+ *static_cast<const gles2::cmds::MatrixLoadIdentityCHROMIUM*>(cmd_data);
+ (void)c;
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadIdentityCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
+ if (!validators_->matrix_mode.IsValid(matrixMode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glMatrixLoadIdentityCHROMIUM", matrixMode, "matrixMode");
+ return error::kNoError;
+ }
+ DoMatrixLoadIdentityCHROMIUM(matrixMode);
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
+ switch (cap) {
+ case GL_BLEND:
+ state_.enable_flags.blend = enabled;
+ if (state_.enable_flags.cached_blend != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_blend = enabled;
+ return true;
+ }
+ return false;
+ case GL_CULL_FACE:
+ state_.enable_flags.cull_face = enabled;
+ if (state_.enable_flags.cached_cull_face != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_cull_face = enabled;
+ return true;
+ }
+ return false;
+ case GL_DEPTH_TEST:
+ state_.enable_flags.depth_test = enabled;
+ if (state_.enable_flags.cached_depth_test != enabled ||
+ state_.ignore_cached_state) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return false;
+ case GL_DITHER:
+ state_.enable_flags.dither = enabled;
+ if (state_.enable_flags.cached_dither != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_dither = enabled;
+ return true;
+ }
+ return false;
+ case GL_POLYGON_OFFSET_FILL:
+ state_.enable_flags.polygon_offset_fill = enabled;
+ if (state_.enable_flags.cached_polygon_offset_fill != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_polygon_offset_fill = enabled;
+ return true;
+ }
+ return false;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ state_.enable_flags.sample_alpha_to_coverage = enabled;
+ if (state_.enable_flags.cached_sample_alpha_to_coverage != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_sample_alpha_to_coverage = enabled;
+ return true;
+ }
+ return false;
+ case GL_SAMPLE_COVERAGE:
+ state_.enable_flags.sample_coverage = enabled;
+ if (state_.enable_flags.cached_sample_coverage != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_sample_coverage = enabled;
+ return true;
+ }
+ return false;
+ case GL_SCISSOR_TEST:
+ state_.enable_flags.scissor_test = enabled;
+ if (state_.enable_flags.cached_scissor_test != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_scissor_test = enabled;
+ return true;
+ }
+ return false;
+ case GL_STENCIL_TEST:
+ state_.enable_flags.stencil_test = enabled;
+ if (state_.enable_flags.cached_stencil_test != enabled ||
+ state_.ignore_cached_state) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return false;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc b/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
new file mode 100644
index 0000000..ff93ba1
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+MockGLES2Decoder::MockGLES2Decoder()
+ : GLES2Decoder() {
+ ON_CALL(*this, GetCommandName(testing::_))
+ .WillByDefault(testing::Return(""));
+ ON_CALL(*this, MakeCurrent())
+ .WillByDefault(testing::Return(true));
+}
+
+MockGLES2Decoder::~MockGLES2Decoder() {}
+
+error::Error MockGLES2Decoder::FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ return AsyncAPIInterface::DoCommands(
+ num_commands, buffer, num_entries, entries_processed);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
new file mode 100644
index 0000000..7346d8e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the mock GLES2Decoder class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
+
+#include <vector>
+
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "base/callback_forward.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gfx/size.h"
+
+namespace gfx {
+class GLContext;
+class GLSurface;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class ContextGroup;
+class ErrorState;
+class QueryManager;
+struct ContextState;
+
+class MockGLES2Decoder : public GLES2Decoder {
+ public:
+ MockGLES2Decoder();
+ virtual ~MockGLES2Decoder();
+
+ error::Error FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ MOCK_METHOD6(Initialize,
+ bool(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs));
+ MOCK_METHOD1(Destroy, void(bool have_context));
+ MOCK_METHOD1(SetSurface, void(const scoped_refptr<gfx::GLSurface>& surface));
+ MOCK_METHOD1(ProduceFrontBuffer, void(const Mailbox& mailbox));
+ MOCK_METHOD1(ResizeOffscreenFrameBuffer, bool(const gfx::Size& size));
+ MOCK_METHOD0(MakeCurrent, bool());
+ MOCK_METHOD1(GetServiceIdForTesting, uint32(uint32 client_id));
+ MOCK_METHOD0(GetGLES2Util, GLES2Util*());
+ MOCK_METHOD0(GetGLSurface, gfx::GLSurface*());
+ MOCK_METHOD0(GetGLContext, gfx::GLContext*());
+ MOCK_METHOD0(GetContextGroup, ContextGroup*());
+ MOCK_METHOD0(GetContextState, const ContextState*());
+ MOCK_METHOD0(GetCapabilities, Capabilities());
+ MOCK_METHOD0(ProcessPendingQueries, bool());
+ MOCK_METHOD0(HasMoreIdleWork, bool());
+ MOCK_METHOD0(PerformIdleWork, void());
+ MOCK_METHOD1(RestoreState, void(const ContextState* prev_state));
+ MOCK_CONST_METHOD0(RestoreActiveTexture, void());
+ MOCK_CONST_METHOD1(
+ RestoreAllTextureUnitBindings, void(const ContextState* state));
+ MOCK_CONST_METHOD1(
+ RestoreActiveTextureUnitBinding, void(unsigned int target));
+ MOCK_CONST_METHOD0(RestoreBufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreGlobalState, void());
+ MOCK_CONST_METHOD0(RestoreProgramBindings, void());
+ MOCK_METHOD0(RestoreRenderbufferBindings, void());
+ MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
+ MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
+ MOCK_CONST_METHOD0(ClearAllAttributes, void());
+ MOCK_CONST_METHOD0(RestoreAllAttributes, void());
+ MOCK_METHOD0(GetQueryManager, gpu::gles2::QueryManager*());
+ MOCK_METHOD0(GetVertexArrayManager, gpu::gles2::VertexArrayManager*());
+ MOCK_METHOD0(GetImageManager, gpu::gles2::ImageManager*());
+ MOCK_METHOD1(
+ SetResizeCallback, void(const base::Callback<void(gfx::Size, float)>&));
+ MOCK_METHOD0(GetAsyncPixelTransferDelegate,
+ AsyncPixelTransferDelegate*());
+ MOCK_METHOD0(GetAsyncPixelTransferManager,
+ AsyncPixelTransferManager*());
+ MOCK_METHOD0(ResetAsyncPixelTransferManagerForTest, void());
+ MOCK_METHOD1(SetAsyncPixelTransferManagerForTest,
+ void(AsyncPixelTransferManager*));
+ MOCK_METHOD1(SetIgnoreCachedStateForTest, void(bool ignore));
+ MOCK_METHOD3(DoCommand, error::Error(unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data));
+ MOCK_METHOD4(DoCommands,
+ error::Error(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed));
+ MOCK_METHOD2(GetServiceTextureId, bool(uint32 client_texture_id,
+ uint32* service_texture_id));
+ MOCK_METHOD0(GetContextLostReason, error::ContextLostReason());
+ MOCK_CONST_METHOD1(GetCommandName, const char*(unsigned int command_id));
+ MOCK_METHOD10(ClearLevel, bool(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable));
+ MOCK_METHOD0(GetErrorState, ErrorState *());
+
+ MOCK_METHOD0(GetLogger, Logger*());
+ MOCK_METHOD1(SetShaderCacheCallback,
+ void(const ShaderCacheCallback& callback));
+ MOCK_METHOD1(SetWaitSyncPointCallback,
+ void(const WaitSyncPointCallback& callback));
+ MOCK_METHOD1(WaitForReadPixels,
+ void(base::Closure callback));
+ MOCK_METHOD0(GetTextureUploadCount, uint32());
+ MOCK_METHOD0(GetTotalTextureUploadTime, base::TimeDelta());
+ MOCK_METHOD0(GetTotalProcessingCommandsTime, base::TimeDelta());
+ MOCK_METHOD1(AddProcessingCommandsTime, void(base::TimeDelta));
+ MOCK_METHOD0(WasContextLost, bool());
+ MOCK_METHOD0(WasContextLostByRobustnessExtension, bool());
+ MOCK_METHOD1(LoseContext, void(uint32 reset_status));
+
+ DISALLOW_COPY_AND_ASSIGN(MockGLES2Decoder);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
new file mode 100644
index 0000000..e97b4c4
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -0,0 +1,1317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+void GLES2DecoderRGBBackbufferTest::SetUp() {
+ // Test codepath with workaround clear_alpha_in_readpixels because
+ // ReadPixelsEmulator emulates the incorrect driver behavior.
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::CLEAR_ALPHA_IN_READPIXELS));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ SetupDefaultProgram();
+}
+
+// Override default setup so nothing gets setup.
+void GLES2DecoderManualInitTest::SetUp() {
+}
+
+void GLES2DecoderManualInitTest::EnableDisableTest(GLenum cap,
+ bool enable,
+ bool expect_set) {
+ if (expect_set) {
+ SetupExpectationsForEnableDisable(cap, enable);
+ }
+ if (enable) {
+ Enable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ } else {
+ Disable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderTest, GetIntegervCached) {
+ struct TestInfo {
+ GLenum pname;
+ GLint expected;
+ };
+ TestInfo tests[] = {
+ {
+ GL_MAX_TEXTURE_SIZE, TestHelper::kMaxTextureSize,
+ },
+ {
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE, TestHelper::kMaxCubeMapTextureSize,
+ },
+ {
+ GL_MAX_RENDERBUFFER_SIZE, TestHelper::kMaxRenderbufferSize,
+ },
+ };
+ typedef GetIntegerv::Result Result;
+ for (size_t ii = 0; ii < sizeof(tests) / sizeof(tests[0]); ++ii) {
+ const TestInfo& test = tests[ii];
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(test.pname, _)).Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(test.pname, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(test.pname),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(test.expected, result->GetData()[0]);
+ }
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetMaxValueInBufferCHROMIUM) {
+ SetupIndexBuffer();
+ GetMaxValueInBufferCHROMIUM::Result* result =
+ static_cast<GetMaxValueInBufferCHROMIUM::Result*>(shared_memory_address_);
+ *result = 0;
+
+ GetMaxValueInBufferCHROMIUM cmd;
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(7u, *result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(100u, *result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(kInvalidClientId,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kOutOfRangeIndexRangeEnd,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kOutOfRangeIndexRangeEnd * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, IsBuffer) {
+ EXPECT_FALSE(DoIsBuffer(client_buffer_id_));
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ EXPECT_TRUE(DoIsBuffer(client_buffer_id_));
+ DoDeleteBuffer(client_buffer_id_, kServiceBufferId);
+ EXPECT_FALSE(DoIsBuffer(client_buffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsFramebuffer) {
+ EXPECT_FALSE(DoIsFramebuffer(client_framebuffer_id_));
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_TRUE(DoIsFramebuffer(client_framebuffer_id_));
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ true,
+ GL_FRAMEBUFFER,
+ 0,
+ true,
+ GL_FRAMEBUFFER,
+ 0);
+ EXPECT_FALSE(DoIsFramebuffer(client_framebuffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsProgram) {
+ // IsProgram is true as soon as the program is created.
+ EXPECT_TRUE(DoIsProgram(client_program_id_));
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+ EXPECT_FALSE(DoIsProgram(client_program_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsRenderbuffer) {
+ EXPECT_FALSE(DoIsRenderbuffer(client_renderbuffer_id_));
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_TRUE(DoIsRenderbuffer(client_renderbuffer_id_));
+ DoDeleteRenderbuffer(client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_FALSE(DoIsRenderbuffer(client_renderbuffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsShader) {
+ // IsShader is true as soon as the program is created.
+ EXPECT_TRUE(DoIsShader(client_shader_id_));
+ DoDeleteShader(client_shader_id_, kServiceShaderId);
+ EXPECT_FALSE(DoIsShader(client_shader_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsTexture) {
+ EXPECT_FALSE(DoIsTexture(client_texture_id_));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_TRUE(DoIsTexture(client_texture_id_));
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(DoIsTexture(client_texture_id_));
+}
+
+TEST_P(GLES2DecoderTest, GetMultipleIntegervCHROMIUMValidArgs) {
+ const GLsizei kCount = 3;
+ GLenum* pnames = GetSharedMemoryAs<GLenum*>();
+ pnames[0] = GL_DEPTH_WRITEMASK;
+ pnames[1] = GL_COLOR_WRITEMASK;
+ pnames[2] = GL_STENCIL_WRITEMASK;
+ GLint* results =
+ GetSharedMemoryAsWithOffset<GLint*>(sizeof(*pnames) * kCount);
+
+ GLsizei num_results = 0;
+ for (GLsizei ii = 0; ii < kCount; ++ii) {
+ num_results += decoder_->GetGLES2Util()->GLGetNumValuesReturned(pnames[ii]);
+ }
+ const GLsizei result_size = num_results * sizeof(*results);
+ memset(results, 0, result_size);
+
+ const GLint kSentinel = 0x12345678;
+ results[num_results] = kSentinel;
+
+ GetMultipleIntegervCHROMIUM cmd;
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + sizeof(*pnames) * kCount,
+ result_size);
+
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, results[0]); // Depth writemask
+ EXPECT_EQ(1, results[1]); // color writemask red
+ EXPECT_EQ(1, results[2]); // color writemask green
+ EXPECT_EQ(1, results[3]); // color writemask blue
+ EXPECT_EQ(1, results[4]); // color writemask alpha
+ EXPECT_EQ(-1, results[5]); // stencil writemask alpha
+ EXPECT_EQ(kSentinel, results[num_results]); // End of results
+}
+
+TEST_P(GLES2DecoderTest, GetMultipleIntegervCHROMIUMInvalidArgs) {
+ const GLsizei kCount = 3;
+ // Offset the pnames because GLGetError will use the first uint32.
+ const uint32 kPnameOffset = sizeof(uint32);
+ const uint32 kResultsOffset = kPnameOffset + sizeof(GLint) * kCount;
+ GLenum* pnames = GetSharedMemoryAsWithOffset<GLenum*>(kPnameOffset);
+ pnames[0] = GL_DEPTH_WRITEMASK;
+ pnames[1] = GL_COLOR_WRITEMASK;
+ pnames[2] = GL_STENCIL_WRITEMASK;
+ GLint* results = GetSharedMemoryAsWithOffset<GLint*>(kResultsOffset);
+
+ GLsizei num_results = 0;
+ for (GLsizei ii = 0; ii < kCount; ++ii) {
+ num_results += decoder_->GetGLES2Util()->GLGetNumValuesReturned(pnames[ii]);
+ }
+ const GLsizei result_size = num_results * sizeof(*results);
+ memset(results, 0, result_size);
+
+ const GLint kSentinel = 0x12345678;
+ results[num_results] = kSentinel;
+
+ GetMultipleIntegervCHROMIUM cmd;
+ // Check bad pnames pointer.
+ cmd.Init(kInvalidSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad pnames pointer.
+ cmd.Init(kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad count.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ static_cast<GLuint>(-1),
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad results pointer.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad results pointer.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad size.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad size.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size - 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad enum.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ GLenum temp = pnames[2];
+ pnames[2] = GL_TRUE;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ pnames[2] = temp;
+ // Check results area has not been cleared by client.
+ results[1] = 1;
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+ // Check buffer is what we expect
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(1, results[1]);
+ EXPECT_EQ(0, results[2]);
+ EXPECT_EQ(0, results[3]);
+ EXPECT_EQ(0, results[4]);
+ EXPECT_EQ(0, results[5]);
+ EXPECT_EQ(kSentinel, results[num_results]); // End of results
+}
+
+TEST_P(GLES2DecoderManualInitTest, BindGeneratesResourceFalse) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindBuffer cmd2;
+ cmd2.Init(GL_ARRAY_BUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindFramebuffer cmd3;
+ cmd3.Init(GL_FRAMEBUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd3));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindRenderbuffer cmd4;
+ cmd4.Init(GL_RENDERBUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd4));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, EnableFeatureCHROMIUMBadBucket) {
+ const uint32 kBadBucketId = 123;
+ EnableFeatureCHROMIUM cmd;
+ cmd.Init(kBadBucketId, shared_memory_id_, shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, RequestExtensionCHROMIUMBadBucket) {
+ const uint32 kBadBucketId = 123;
+ RequestExtensionCHROMIUM cmd;
+ cmd.Init(kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, BeginQueryEXTDisabled) {
+ // Test something fails if off.
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXT) {
+ InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Test end fails if no begin.
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BeginQueryEXT begin_cmd;
+
+ // Test id = 0 fails.
+ begin_cmd.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, 0, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_ANY_SAMPLES_PASSED_EXT, kNewServiceId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ // Query object should not be created untill BeginQueriesEXT.
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ EXPECT_TRUE(query == NULL);
+
+ // BeginQueryEXT should fail if id is not generated from GenQueriesEXT.
+ begin_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT,
+ kInvalidClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ begin_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // After BeginQueriesEXT id name should have query object associated with it.
+ query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test trying begin again fails
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test end fails with different target
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test end succeeds
+ EXPECT_CALL(*gl_, EndQueryARB(GL_ANY_SAMPLES_PASSED_EXT))
+ .Times(1)
+ .RetiresOnSaturation();
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(query->pending());
+
+ EXPECT_CALL(*gl_, DeleteQueriesARB(1, _)).Times(1).RetiresOnSaturation();
+}
+
+struct QueryType {
+ GLenum type;
+ bool is_gl;
+};
+
+const QueryType kQueryTypes[] = {
+ {GL_COMMANDS_ISSUED_CHROMIUM, false},
+ {GL_LATENCY_QUERY_CHROMIUM, false},
+ {GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM, false},
+ {GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM, false},
+ {GL_GET_ERROR_QUERY_CHROMIUM, false},
+ {GL_COMMANDS_COMPLETED_CHROMIUM, false},
+ {GL_ANY_SAMPLES_PASSED_EXT, true},
+};
+
+static void CheckBeginEndQueryBadMemoryFails(GLES2DecoderTestBase* test,
+ GLuint client_id,
+ GLuint service_id,
+ const QueryType& query_type,
+ int32 shm_id,
+ uint32 shm_offset) {
+ // We need to reset the decoder on each iteration, because we lose the
+ // context every time.
+ GLES2DecoderTestBase::InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean GL_ARB_sync";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ test->InitDecoder(init);
+ ::testing::StrictMock< ::gfx::MockGLInterface>* gl = test->GetGLMock();
+
+ BeginQueryEXT begin_cmd;
+
+ test->GenHelper<GenQueriesEXTImmediate>(client_id);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(service_id))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BeginQueryARB(query_type.type, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ // Test bad shared memory fails
+ begin_cmd.Init(query_type.type, client_id, shm_id, shm_offset);
+ error::Error error1 = test->ExecuteCmd(begin_cmd);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, EndQueryARB(query_type.type))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (query_type.type == GL_GET_ERROR_QUERY_CHROMIUM) {
+ EXPECT_CALL(*gl, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+ GLsync kGlSync = reinterpret_cast<GLsync>(0xdeadbeef);
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+ EXPECT_CALL(*gl, Flush()).RetiresOnSaturation();
+ EXPECT_CALL(*gl, FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
+ .WillOnce(Return(kGlSync))
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ }
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(query_type.type, 1);
+ error::Error error2 = test->ExecuteCmd(end_cmd);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(
+ *gl, GetQueryObjectuivARB(service_id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetQueryObjectuivARB(service_id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ }
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_ALREADY_SIGNALED))
+ .RetiresOnSaturation();
+ }
+
+ QueryManager* query_manager = test->GetDecoder()->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ bool process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(error1 != error::kNoError || error2 != error::kNoError ||
+ !process_success);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, DeleteQueriesARB(1, _)).Times(1).RetiresOnSaturation();
+ }
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl, DeleteSync(kGlSync)).Times(1).RetiresOnSaturation();
+ }
+ test->ResetDecoder();
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryIdFails) {
+ for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryOffsetFails) {
+ for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ // Out-of-bounds.
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ // Overflow.
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kSharedMemoryId,
+ 0xfffffffcu);
+ }
+}
+
+TEST_P(GLES2DecoderTest, BeginEndQueryEXTCommandsIssuedCHROMIUM) {
+ BeginQueryEXT begin_cmd;
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ begin_cmd.Init(GL_COMMANDS_ISSUED_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test end succeeds
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_COMMANDS_ISSUED_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(query->pending());
+}
+
+TEST_P(GLES2DecoderTest, BeginEndQueryEXTGetErrorQueryCHROMIUM) {
+ BeginQueryEXT begin_cmd;
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ begin_cmd.Init(GL_GET_ERROR_QUERY_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test end succeeds
+ QuerySync* sync = static_cast<QuerySync*>(shared_memory_address_);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_INVALID_VALUE))
+ .RetiresOnSaturation();
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_GET_ERROR_QUERY_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE),
+ static_cast<GLenum>(sync->result));
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTCommandsCompletedCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean GL_ARB_sync";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ BeginQueryEXT begin_cmd;
+ begin_cmd.Init(GL_COMMANDS_COMPLETED_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ GLsync kGlSync = reinterpret_cast<GLsync>(0xdeadbeef);
+ EXPECT_CALL(*gl_, Flush()).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
+ .WillOnce(Return(kGlSync))
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_COMMANDS_COMPLETED_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(query->pending());
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_TIMEOUT_EXPIRED))
+ .RetiresOnSaturation();
+ bool process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(process_success);
+ EXPECT_TRUE(query->pending());
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_ALREADY_SIGNALED))
+ .RetiresOnSaturation();
+ process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(process_success);
+ EXPECT_FALSE(query->pending());
+ QuerySync* sync = static_cast<QuerySync*>(shared_memory_address_);
+ EXPECT_EQ(static_cast<GLenum>(0), static_cast<GLenum>(sync->result));
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, DeleteSync(kGlSync)).Times(1).RetiresOnSaturation();
+ ResetDecoder();
+}
+
+TEST_P(GLES2DecoderTest, IsEnabledReturnsCachedValue) {
+ // NOTE: There are no expectations because no GL functions should be
+ // called for DEPTH_TEST or STENCIL_TEST
+ static const GLenum kStates[] = {
+ GL_DEPTH_TEST, GL_STENCIL_TEST,
+ };
+ for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ Enable enable_cmd;
+ GLenum state = kStates[ii];
+ enable_cmd.Init(state);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(enable_cmd));
+ IsEnabled::Result* result =
+ static_cast<IsEnabled::Result*>(shared_memory_address_);
+ IsEnabled is_enabled_cmd;
+ is_enabled_cmd.Init(state, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(is_enabled_cmd));
+ EXPECT_NE(0u, *result);
+ Disable disable_cmd;
+ disable_cmd.Init(state);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(disable_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(is_enabled_cmd));
+ EXPECT_EQ(0u, *result);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, GpuMemoryManagerCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->pool() == GL_TEXTURE_POOL_UNMANAGED_CHROMIUM);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_TRUE(texture->pool() == GL_TEXTURE_POOL_MANAGED_CHROMIUM);
+
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_POOL_CHROMIUM, GL_NONE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+namespace {
+
+class SizeOnlyMemoryTracker : public MemoryTracker {
+ public:
+ SizeOnlyMemoryTracker() {
+ // These are the default textures. 1 for TEXTURE_2D and 6 faces for
+ // TEXTURE_CUBE_MAP.
+ const size_t kInitialUnmanagedPoolSize = 7 * 4;
+ const size_t kInitialManagedPoolSize = 0;
+ pool_infos_[MemoryTracker::kUnmanaged].initial_size =
+ kInitialUnmanagedPoolSize;
+ pool_infos_[MemoryTracker::kManaged].initial_size = kInitialManagedPoolSize;
+ }
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ MOCK_METHOD1(EnsureGPUMemoryAvailable, bool(size_t size_needed));
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) {
+ PoolInfo& info = pool_infos_[pool];
+ info.size += new_size - old_size;
+ }
+
+ size_t GetPoolSize(Pool pool) {
+ const PoolInfo& info = pool_infos_[pool];
+ return info.size - info.initial_size;
+ }
+
+ private:
+ virtual ~SizeOnlyMemoryTracker() {}
+ struct PoolInfo {
+ PoolInfo() : initial_size(0), size(0) {}
+ size_t initial_size;
+ size_t size;
+ };
+ std::map<Pool, PoolInfo> pool_infos_;
+};
+
+} // anonymous namespace.
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerInitialSize) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ // Expect that initial size - size is 0.
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerTexImage2D) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(64))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(64u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check we get out of memory and no call to glTexImage2D if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(64))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ TexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(64u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerTexStorage2DEXT) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Check we get out of memory and no call to glTexStorage2DEXT
+ // if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ TexStorage2DEXT cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, GL_RGBA8, 8, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerCopyTexImage2D) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 4;
+ GLsizei height = 8;
+ GLint border = 0;
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ target, level, internal_format, 0, 0, width, height, border))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check we get out of memory and no call to glCopyTexImage2D if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerRenderbufferStorage) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(GL_RENDERBUFFER, GL_RGBA, 8, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 8, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ // Check we get out of memory and no call to glRenderbufferStorage if Ensure
+ // fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerBufferData) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(GL_ARRAY_BUFFER, 128, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ BufferData cmd;
+ cmd.Init(GL_ARRAY_BUFFER, 128, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+ // Check we get out of memory and no call to glBufferData if Ensure
+ // fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ImmutableCopyTexImage2D) {
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kInternalFormat = GL_RGBA;
+ const GLenum kSizedInternalFormat = GL_RGBA8;
+ const GLsizei kWidth = 4;
+ const GLsizei kHeight = 8;
+ const GLint kBorder = 0;
+ InitState init;
+ init.extensions = "GL_EXT_texture_storage";
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ // CopyTexImage2D will call arbitrary amount of GetErrors.
+ EXPECT_CALL(*gl_, GetError())
+ .Times(AtLeast(1));
+
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight,
+ kBorder))
+ .Times(1);
+
+ EXPECT_CALL(*gl_,
+ TexStorage2DEXT(
+ kTarget, kLevel, kSizedInternalFormat, kWidth, kHeight))
+ .Times(1);
+ CopyTexImage2D copy_cmd;
+ copy_cmd.Init(kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexStorage2DEXT storage_cmd;
+ storage_cmd.Init(kTarget, kLevel, kSizedInternalFormat, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(storage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // This should not invoke CopyTexImage2D.
+ copy_cmd.Init(kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMValidArgs) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(GL_GUILTY_CONTEXT_RESET_ARB))
+ .Times(1);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(error::kLostContext, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMInvalidArgs0_0) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(_))
+ .Times(0);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_NONE, GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMInvalidArgs1_0) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(_))
+ .Times(0);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_NONE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+class GLES2DecoderDoCommandsTest : public GLES2DecoderTest {
+ public:
+ GLES2DecoderDoCommandsTest() {
+ for (int i = 0; i < 3; i++) {
+ cmds_[i].Init(GL_BLEND);
+ }
+ entries_per_cmd_ = ComputeNumEntries(cmds_[0].ComputeSize());
+ }
+
+ void SetExpectationsForNCommands(int num_commands) {
+ for (int i = 0; i < num_commands; i++)
+ SetupExpectationsForEnableDisable(GL_BLEND, true);
+ }
+
+ protected:
+ Enable cmds_[3];
+ int entries_per_cmd_;
+};
+
+// Test that processing with 0 entries does nothing.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOneOfZero) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(0);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(1, &cmds_, entries_per_cmd_ * 0, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, num_processed);
+}
+
+// Test processing at granularity of single commands.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOneOfOne) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(1, &cmds_, entries_per_cmd_ * 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test processing at granularity of multiple commands.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsThreeOfThree) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(3);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(3, &cmds_, entries_per_cmd_ * 3, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ * 3, num_processed);
+}
+
+// Test processing a request smaller than available entries.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsTwoOfThree) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(2);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(2, &cmds_, entries_per_cmd_ * 3, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ * 2, num_processed);
+}
+
+// Test that processing stops on a command with size 0.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsZeroCmdSize) {
+ cmds_[1].header.size = 0;
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(
+ error::kInvalidSize,
+ decoder_->DoCommands(2, &cmds_, entries_per_cmd_ * 2, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test that processing stops on a command with size greater than available.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOutOfBounds) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(error::kOutOfBounds,
+ decoder_->DoCommands(
+ 2, &cmds_, entries_per_cmd_ * 2 - 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test that commands with bad argument size are skipped without processing.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsBadArgSize) {
+ cmds_[1].header.size += 1;
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(error::kInvalidArguments,
+ decoder_->DoCommands(
+ 2, &cmds_, entries_per_cmd_ * 2 + 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ + cmds_[1].header.size, num_processed);
+}
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderWithShaderTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderManualInitTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderRGBBackbufferTest,
+ ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderDoCommandsTest, ::testing::Bool());
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
new file mode 100644
index 0000000..fea20ab
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTest : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest() {}
+
+ protected:
+ void CheckReadPixelsOutOfRange(GLint in_read_x,
+ GLint in_read_y,
+ GLsizei in_read_width,
+ GLsizei in_read_height,
+ bool init);
+};
+
+class GLES2DecoderWithShaderTest : public GLES2DecoderWithShaderTestBase {
+ public:
+ GLES2DecoderWithShaderTest() : GLES2DecoderWithShaderTestBase() {}
+
+ void CheckTextureChangesMarkFBOAsNotComplete(bool bound_fbo);
+ void CheckRenderbufferChangesMarkFBOAsNotComplete(bool bound_fbo);
+};
+
+class GLES2DecoderRGBBackbufferTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderRGBBackbufferTest() {}
+
+ virtual void SetUp();
+};
+
+class GLES2DecoderManualInitTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderManualInitTest() {}
+
+ // Override default setup so nothing gets setup.
+ virtual void SetUp();
+
+ void DirtyStateMaskTest(GLuint color_bits,
+ bool depth_mask,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask);
+ void EnableDisableTest(GLenum cap, bool enable, bool expect_set);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
new file mode 100644
index 0000000..0aca4df
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_base.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
+
+void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations() {
+ ExpectEnableDisable(GL_BLEND, false);
+ ExpectEnableDisable(GL_CULL_FACE, false);
+ ExpectEnableDisable(GL_DEPTH_TEST, false);
+ ExpectEnableDisable(GL_DITHER, true);
+ ExpectEnableDisable(GL_POLYGON_OFFSET_FILL, false);
+ ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE, false);
+ ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
+ ExpectEnableDisable(GL_SCISSOR_TEST, false);
+ ExpectEnableDisable(GL_STENCIL_TEST, false);
+}
+
+void GLES2DecoderTestBase::SetupInitStateExpectations() {
+ EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CullFace(GL_BACK)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthFunc(GL_LESS)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthMask(true)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthRange(0.0f, 1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FrontFace(GL_CCW)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().oes_standard_derivatives) {
+ EXPECT_CALL(*gl_,
+ Hint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, LineWidth(1.0f)).Times(1).RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, PolygonOffset(0.0f, 0.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, SampleCoverage(1.0f, false)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ Scissor(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ Viewport(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
new file mode 100644
index 0000000..912e908
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
@@ -0,0 +1,306 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+void ShaderCacheCb(const std::string& key, const std::string& shader) {
+}
+} // namespace
+
+class GLES2DecoderTest1 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest1() { }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest1, ::testing::Bool());
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GenerateMipmap, 0>(
+ bool valid) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CheckFramebufferStatus, 0>(
+ bool /* valid */) {
+ // Give it a valid framebuffer.
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ DoRenderbufferStorage(
+ GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER,
+ client_renderbuffer_id_, kServiceRenderbufferId, GL_NO_ERROR);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Clear, 0>(bool valid) {
+ if (valid) {
+ SetupExpectationsForApplyingDefaultDirtyState();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::ColorMask, 0>(
+ bool /* valid */) {
+ // We bind a framebuffer color the colormask test since the framebuffer
+ // will be considered RGB.
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexImage2D, 0>(
+ bool valid) {
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexSubImage2D, 0>(
+ bool valid) {
+ if (valid) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 2, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::DetachShader, 0>(bool valid) {
+ if (valid) {
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(
+ bool valid) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferTexture2D, 0>(
+ bool valid) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetBufferParameteriv, 0>(bool /* valid */) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetFramebufferAttachmentParameteriv, 0>(bool /* valid */) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetRenderbufferParameteriv, 0>(
+ bool /* valid */) {
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(
+ bool valid) {
+ if (valid) {
+ // GetProgramiv calls ClearGLError then GetError to make sure
+ // it actually got a value so it can report correctly to the client.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
+ bool /* valid */) {
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ const char* log = "hello"; // Matches auto-generated unit test.
+ DoCreateShader(
+ GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientVertexShaderId), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientFragmentShaderId), true);
+
+ InSequence dummy;
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceVertexShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceFragmentShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1));
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(strlen(log) + 1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetProgramInfoLog(kServiceProgramId, strlen(log) + 1, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<2>(strlen(log)),
+ SetArrayArgument<3>(log, log + strlen(log) + 1)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+
+ Program* program = GetProgram(client_program_id_);
+ ASSERT_TRUE(program != NULL);
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, kClientVertexShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribfv, 0>(
+ bool valid) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoVertexAttribPointer(1, 1, GL_FLOAT, 0, 0);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribiv, 0>(
+ bool valid) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoVertexAttribPointer(1, 1, GL_FLOAT, 0, 0);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>(
+ bool valid) {
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+ if (valid) {
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, _, 3, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
new file mode 100644
index 0000000..b60bd3e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -0,0 +1,1963 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_1.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
+
+// TODO(gman): ActiveTexture
+
+TEST_P(GLES2DecoderTest1, AttachShaderValidArgs) {
+ EXPECT_CALL(*gl_, AttachShader(kServiceProgramId, kServiceShaderId));
+ SpecializedSetup<cmds::AttachShader, 0>(true);
+ cmds::AttachShader cmd;
+ cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): BindAttribLocationBucket
+
+TEST_P(GLES2DecoderTest1, BindBufferValidArgs) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceBufferId));
+ SpecializedSetup<cmds::BindBuffer, 0>(true);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_ARRAY_BUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindBufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindBuffer, 0>(true);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_ARRAY_BUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindBufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindBuffer(_, _)).Times(0);
+ SpecializedSetup<cmds::BindBuffer, 0>(false);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferValidArgs) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, kServiceFramebufferId));
+ SpecializedSetup<cmds::BindFramebuffer, 0>(true);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindFramebuffer, 0>(true);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindFramebuffer, 0>(false);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindFramebuffer, 0>(false);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferValidArgs) {
+ EXPECT_CALL(*gl_,
+ BindRenderbufferEXT(GL_RENDERBUFFER, kServiceRenderbufferId));
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(true);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(GL_RENDERBUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(true);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(false);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureValidArgs) {
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId));
+ SpecializedSetup<cmds::BindTexture, 0>(true);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_2D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindTexture, 0>(true);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_2D, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindTexture(_, _)).Times(0);
+ SpecializedSetup<cmds::BindTexture, 0>(false);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_1D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BindTexture(_, _)).Times(0);
+ SpecializedSetup<cmds::BindTexture, 0>(false);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_3D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendColorValidArgs) {
+ EXPECT_CALL(*gl_, BlendColor(1, 2, 3, 4));
+ SpecializedSetup<cmds::BlendColor, 0>(true);
+ cmds::BlendColor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationValidArgs) {
+ EXPECT_CALL(*gl_, BlendEquation(GL_FUNC_SUBTRACT));
+ SpecializedSetup<cmds::BlendEquation, 0>(true);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_FUNC_SUBTRACT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BlendEquation(_)).Times(0);
+ SpecializedSetup<cmds::BlendEquation, 0>(false);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_MIN);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BlendEquation(_)).Times(0);
+ SpecializedSetup<cmds::BlendEquation, 0>(false);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_MAX);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateValidArgs) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_SUBTRACT, GL_FUNC_ADD));
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(true);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_MIN, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_MAX, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_MIN);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_MAX);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendFuncValidArgs) {
+ EXPECT_CALL(*gl_, BlendFunc(GL_ZERO, GL_ZERO));
+ SpecializedSetup<cmds::BlendFunc, 0>(true);
+ cmds::BlendFunc cmd;
+ cmd.Init(GL_ZERO, GL_ZERO);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendFuncSeparateValidArgs) {
+ EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO));
+ SpecializedSetup<cmds::BlendFuncSeparate, 0>(true);
+ cmds::BlendFuncSeparate cmd;
+ cmd.Init(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): BufferData
+
+// TODO(gman): BufferSubData
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusValidArgs) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER));
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(true);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, ClearValidArgs) {
+ EXPECT_CALL(*gl_, Clear(1));
+ SpecializedSetup<cmds::Clear, 0>(true);
+ cmds::Clear cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearColorValidArgs) {
+ EXPECT_CALL(*gl_, ClearColor(1, 2, 3, 4));
+ SpecializedSetup<cmds::ClearColor, 0>(true);
+ cmds::ClearColor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearDepthfValidArgs) {
+ EXPECT_CALL(*gl_, ClearDepth(0.5f));
+ SpecializedSetup<cmds::ClearDepthf, 0>(true);
+ cmds::ClearDepthf cmd;
+ cmd.Init(0.5f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearStencilValidArgs) {
+ EXPECT_CALL(*gl_, ClearStencil(1));
+ SpecializedSetup<cmds::ClearStencil, 0>(true);
+ cmds::ClearStencil cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ColorMaskValidArgs) {
+ SpecializedSetup<cmds::ColorMask, 0>(true);
+ cmds::ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): CompileShader
+// TODO(gman): CompressedTexImage2DBucket
+// TODO(gman): CompressedTexImage2D
+
+// TODO(gman): CompressedTexSubImage2DBucket
+// TODO(gman): CompressedTexSubImage2D
+
+// TODO(gman): CopyTexImage2D
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DValidArgs) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8));
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(true);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs6_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, -1, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs7_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CreateProgramValidArgs) {
+ EXPECT_CALL(*gl_, CreateProgram()).WillOnce(Return(kNewServiceId));
+ SpecializedSetup<cmds::CreateProgram, 0>(true);
+ cmds::CreateProgram cmd;
+ cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetProgram(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, CreateShaderValidArgs) {
+ EXPECT_CALL(*gl_, CreateShader(GL_VERTEX_SHADER))
+ .WillOnce(Return(kNewServiceId));
+ SpecializedSetup<cmds::CreateShader, 0>(true);
+ cmds::CreateShader cmd;
+ cmd.Init(GL_VERTEX_SHADER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetShader(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, CreateShaderInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CreateShader(_)).Times(0);
+ SpecializedSetup<cmds::CreateShader, 0>(false);
+ cmds::CreateShader cmd;
+ cmd.Init(GL_GEOMETRY_SHADER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CullFaceValidArgs) {
+ EXPECT_CALL(*gl_, CullFace(GL_FRONT));
+ SpecializedSetup<cmds::CullFace, 0>(true);
+ cmds::CullFace cmd;
+ cmd.Init(GL_FRONT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteBuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, Pointee(kServiceBufferId))).Times(1);
+ cmds::DeleteBuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteBuffersImmediate>();
+ SpecializedSetup<cmds::DeleteBuffersImmediate, 0>(true);
+ cmd.Init(1, &client_buffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_buffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(client_buffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteBuffersImmediateInvalidArgs) {
+ cmds::DeleteBuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteBuffersImmediate>();
+ SpecializedSetup<cmds::DeleteBuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteFramebuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, Pointee(kServiceFramebufferId)))
+ .Times(1);
+ cmds::DeleteFramebuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteFramebuffersImmediate>();
+ SpecializedSetup<cmds::DeleteFramebuffersImmediate, 0>(true);
+ cmd.Init(1, &client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_framebuffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(client_framebuffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteFramebuffersImmediateInvalidArgs) {
+ cmds::DeleteFramebuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteFramebuffersImmediate>();
+ SpecializedSetup<cmds::DeleteFramebuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteProgramValidArgs) {
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId));
+ SpecializedSetup<cmds::DeleteProgram, 0>(true);
+ cmds::DeleteProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteRenderbuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, Pointee(kServiceRenderbufferId)))
+ .Times(1);
+ cmds::DeleteRenderbuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteRenderbuffersImmediate>();
+ SpecializedSetup<cmds::DeleteRenderbuffersImmediate, 0>(true);
+ cmd.Init(1, &client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_renderbuffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(client_renderbuffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteRenderbuffersImmediateInvalidArgs) {
+ cmds::DeleteRenderbuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteRenderbuffersImmediate>();
+ SpecializedSetup<cmds::DeleteRenderbuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteShaderValidArgs) {
+ EXPECT_CALL(*gl_, DeleteShader(kServiceShaderId));
+ SpecializedSetup<cmds::DeleteShader, 0>(true);
+ cmds::DeleteShader cmd;
+ cmd.Init(client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(kServiceTextureId))).Times(1);
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(true);
+ cmd.Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_texture_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(client_texture_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteTexturesImmediateInvalidArgs) {
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DepthFuncValidArgs) {
+ EXPECT_CALL(*gl_, DepthFunc(GL_NEVER));
+ SpecializedSetup<cmds::DepthFunc, 0>(true);
+ cmds::DepthFunc cmd;
+ cmd.Init(GL_NEVER);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DepthMaskValidArgs) {
+ SpecializedSetup<cmds::DepthMask, 0>(true);
+ cmds::DepthMask cmd;
+ cmd.Init(true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DepthRangefValidArgs) {
+ EXPECT_CALL(*gl_, DepthRange(1, 2));
+ SpecializedSetup<cmds::DepthRangef, 0>(true);
+ cmds::DepthRangef cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DetachShaderValidArgs) {
+ EXPECT_CALL(*gl_, DetachShader(kServiceProgramId, kServiceShaderId));
+ SpecializedSetup<cmds::DetachShader, 0>(true);
+ cmds::DetachShader cmd;
+ cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableValidArgs) {
+ SetupExpectationsForEnableDisable(GL_BLEND, false);
+ SpecializedSetup<cmds::Disable, 0>(true);
+ cmds::Disable cmd;
+ cmd.Init(GL_BLEND);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Disable(_)).Times(0);
+ SpecializedSetup<cmds::Disable, 0>(false);
+ cmds::Disable cmd;
+ cmd.Init(GL_CLIP_PLANE0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, Disable(_)).Times(0);
+ SpecializedSetup<cmds::Disable, 0>(false);
+ cmds::Disable cmd;
+ cmd.Init(GL_POINT_SPRITE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableVertexAttribArrayValidArgs) {
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(1));
+ SpecializedSetup<cmds::DisableVertexAttribArray, 0>(true);
+ cmds::DisableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): DrawArrays
+
+// TODO(gman): DrawElements
+
+TEST_P(GLES2DecoderTest1, EnableValidArgs) {
+ SetupExpectationsForEnableDisable(GL_BLEND, true);
+ SpecializedSetup<cmds::Enable, 0>(true);
+ cmds::Enable cmd;
+ cmd.Init(GL_BLEND);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Enable(_)).Times(0);
+ SpecializedSetup<cmds::Enable, 0>(false);
+ cmds::Enable cmd;
+ cmd.Init(GL_CLIP_PLANE0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, Enable(_)).Times(0);
+ SpecializedSetup<cmds::Enable, 0>(false);
+ cmds::Enable cmd;
+ cmd.Init(GL_POINT_SPRITE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableVertexAttribArrayValidArgs) {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(1));
+ SpecializedSetup<cmds::EnableVertexAttribArray, 0>(true);
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FinishValidArgs) {
+ EXPECT_CALL(*gl_, Finish());
+ SpecializedSetup<cmds::Finish, 0>(true);
+ cmds::Finish cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FlushValidArgs) {
+ EXPECT_CALL(*gl_, Flush());
+ SpecializedSetup<cmds::Flush, 0>(true);
+ cmds::Flush cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferValidArgs) {
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId));
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(true);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DValidArgs) {
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0));
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(true);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_PROXY_TEXTURE_CUBE_MAP,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FrontFaceValidArgs) {
+ EXPECT_CALL(*gl_, FrontFace(GL_CW));
+ SpecializedSetup<cmds::FrontFace, 0>(true);
+ cmds::FrontFace cmd;
+ cmd.Init(GL_CW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenBuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenBuffersImmediate* cmd = GetImmediateAs<cmds::GenBuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenBuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenBuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _)).Times(0);
+ cmds::GenBuffersImmediate* cmd = GetImmediateAs<cmds::GenBuffersImmediate>();
+ SpecializedSetup<cmds::GenBuffersImmediate, 0>(false);
+ cmd->Init(1, &client_buffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_buffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapValidArgs) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ SpecializedSetup<cmds::GenerateMipmap, 0>(true);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ SpecializedSetup<cmds::GenerateMipmap, 0>(false);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_1D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ SpecializedSetup<cmds::GenerateMipmap, 0>(false);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_3D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenFramebuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenFramebuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenFramebuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenFramebuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenFramebuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(_, _)).Times(0);
+ cmds::GenFramebuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenFramebuffersImmediate>();
+ SpecializedSetup<cmds::GenFramebuffersImmediate, 0>(false);
+ cmd->Init(1, &client_framebuffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_framebuffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenRenderbuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenRenderbuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenRenderbuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenRenderbuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenRenderbuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(_, _)).Times(0);
+ cmds::GenRenderbuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenRenderbuffersImmediate>();
+ SpecializedSetup<cmds::GenRenderbuffersImmediate, 0>(false);
+ cmd->Init(1, &client_renderbuffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_renderbuffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenTexturesImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(_, _)).Times(0);
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(false);
+ cmd->Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_texture_id_)));
+}
+// TODO(gman): GetActiveAttrib
+
+// TODO(gman): GetActiveUniform
+
+// TODO(gman): GetAttachedShaders
+
+// TODO(gman): GetAttribLocation
+
+TEST_P(GLES2DecoderTest1, GetBooleanvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetBooleanv, 0>(true);
+ typedef cmds::GetBooleanv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetBooleanv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivValidArgs) {
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(true);
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_BUFFER_SIZE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_PIXEL_PACK_BUFFER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER, GL_BUFFER_SIZE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetErrorValidArgs) {
+ EXPECT_CALL(*gl_, GetError());
+ SpecializedSetup<cmds::GetError, 0>(true);
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetErrorInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, GetError()).Times(0);
+ SpecializedSetup<cmds::GetError, 0>(false);
+ cmds::GetError cmd;
+ cmd.Init(kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetFloatv, 0>(true);
+ typedef cmds::GetFloatv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetFloatv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(true);
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetFramebufferAttachmentParameterivEXT(
+ GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ result->GetData()));
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ kInvalidSharedMemoryId,
+ 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs3_1) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetIntegerv, 0>(true);
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivValidArgs) {
+ SpecializedSetup<cmds::GetProgramiv, 0>(true);
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_,
+ GL_DELETE_STATUS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_,
+ GL_DELETE_STATUS,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramInfoLogValidArgs) {
+ const char* kInfo = "hello";
+ const uint32_t kBucketId = 123;
+ SpecializedSetup<cmds::GetProgramInfoLog, 0>(true);
+
+ cmds::GetProgramInfoLog cmd;
+ cmd.Init(client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
+ EXPECT_EQ(0,
+ memcmp(bucket->GetData(0, bucket->size()), kInfo, bucket->size()));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramInfoLogInvalidArgs) {
+ const uint32_t kBucketId = 123;
+ cmds::GetProgramInfoLog cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(true);
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(
+ *gl_,
+ GetRenderbufferParameterivEXT(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, result->GetData()));
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_RENDERBUFFER_RED_SIZE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetShaderiv, 0>(true);
+ typedef cmds::GetShaderiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetShaderiv(kServiceShaderId, GL_SHADER_TYPE, result->GetData()));
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_,
+ GL_SHADER_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_SHADER_TYPE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetShaderiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetShaderiv, 0>(false);
+ cmds::GetShaderiv::Result* result =
+ static_cast<cmds::GetShaderiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_, GL_SHADER_TYPE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetShaderiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetShaderiv, 0>(false);
+ cmds::GetShaderiv::Result* result =
+ static_cast<cmds::GetShaderiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_,
+ GL_SHADER_TYPE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetShaderInfoLog
+// TODO(gman): GetShaderPrecisionFormat
+
+// TODO(gman): GetShaderSource
+// TODO(gman): GetString
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(true);
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetTexParameterfv(
+ GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, result->GetData()));
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_TEXTURE_MAG_FILTER),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_GENERATE_MIPMAP,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(true);
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetTexParameteriv(
+ GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, result->GetData()));
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_TEXTURE_MAG_FILTER),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_GENERATE_MIPMAP,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetUniformfv
+
+// TODO(gman): GetUniformiv
+
+// TODO(gman): GetUniformLocation
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvValidArgs) {
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(true);
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetVertexAttribfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(false);
+ cmds::GetVertexAttribfv::Result* result =
+ static_cast<cmds::GetVertexAttribfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetVertexAttribfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(false);
+ cmds::GetVertexAttribfv::Result* result =
+ static_cast<cmds::GetVertexAttribfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivValidArgs) {
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(true);
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetVertexAttribiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(false);
+ cmds::GetVertexAttribiv::Result* result =
+ static_cast<cmds::GetVertexAttribiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetVertexAttribiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(false);
+ cmds::GetVertexAttribiv::Result* result =
+ static_cast<cmds::GetVertexAttribiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetVertexAttribPointerv
+
+TEST_P(GLES2DecoderTest1, HintValidArgs) {
+ EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST));
+ SpecializedSetup<cmds::Hint, 0>(true);
+ cmds::Hint cmd;
+ cmd.Init(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, HintInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Hint(_, _)).Times(0);
+ SpecializedSetup<cmds::Hint, 0>(false);
+ cmds::Hint cmd;
+ cmd.Init(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsBufferValidArgs) {
+ SpecializedSetup<cmds::IsBuffer, 0>(true);
+ cmds::IsBuffer cmd;
+ cmd.Init(client_buffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsBufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsBuffer, 0>(false);
+ cmds::IsBuffer cmd;
+ cmd.Init(client_buffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_buffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledValidArgs) {
+ SpecializedSetup<cmds::IsEnabled, 0>(true);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_BLEND, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, IsEnabled(_)).Times(0);
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_CLIP_PLANE0, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, IsEnabled(_)).Times(0);
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_POINT_SPRITE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_BLEND, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(GL_BLEND, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsFramebufferValidArgs) {
+ SpecializedSetup<cmds::IsFramebuffer, 0>(true);
+ cmds::IsFramebuffer cmd;
+ cmd.Init(client_framebuffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsFramebufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsFramebuffer, 0>(false);
+ cmds::IsFramebuffer cmd;
+ cmd.Init(
+ client_framebuffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_framebuffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsProgramValidArgs) {
+ SpecializedSetup<cmds::IsProgram, 0>(true);
+ cmds::IsProgram cmd;
+ cmd.Init(client_program_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsProgramInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsProgram, 0>(false);
+ cmds::IsProgram cmd;
+ cmd.Init(client_program_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsRenderbufferValidArgs) {
+ SpecializedSetup<cmds::IsRenderbuffer, 0>(true);
+ cmds::IsRenderbuffer cmd;
+ cmd.Init(client_renderbuffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsRenderbufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsRenderbuffer, 0>(false);
+ cmds::IsRenderbuffer cmd;
+ cmd.Init(
+ client_renderbuffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_renderbuffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsShaderValidArgs) {
+ SpecializedSetup<cmds::IsShader, 0>(true);
+ cmds::IsShader cmd;
+ cmd.Init(client_shader_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsShaderInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsShader, 0>(false);
+ cmds::IsShader cmd;
+ cmd.Init(client_shader_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_shader_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsTextureValidArgs) {
+ SpecializedSetup<cmds::IsTexture, 0>(true);
+ cmds::IsTexture cmd;
+ cmd.Init(client_texture_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsTextureInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsTexture, 0>(false);
+ cmds::IsTexture cmd;
+ cmd.Init(client_texture_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_texture_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthValidArgs) {
+ EXPECT_CALL(*gl_, LineWidth(0.5f));
+ SpecializedSetup<cmds::LineWidth, 0>(true);
+ cmds::LineWidth cmd;
+ cmd.Init(0.5f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthInvalidValue0_0) {
+ SpecializedSetup<cmds::LineWidth, 0>(false);
+ cmds::LineWidth cmd;
+ cmd.Init(0.0f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthNaNValue0) {
+ SpecializedSetup<cmds::LineWidth, 0>(false);
+ cmds::LineWidth cmd;
+ cmd.Init(nanf(""));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LinkProgramValidArgs) {
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId));
+ SpecializedSetup<cmds::LinkProgram, 0>(true);
+ cmds::LinkProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): PixelStorei
+
+TEST_P(GLES2DecoderTest1, PolygonOffsetValidArgs) {
+ EXPECT_CALL(*gl_, PolygonOffset(1, 2));
+ SpecializedSetup<cmds::PolygonOffset, 0>(true);
+ cmds::PolygonOffset cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): ReadPixels
+
+// TODO(gman): ReleaseShaderCompiler
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageValidArgs) {
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(true);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_FRAMEBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, SampleCoverageValidArgs) {
+ EXPECT_CALL(*gl_, SampleCoverage(1, true));
+ SpecializedSetup<cmds::SampleCoverage, 0>(true);
+ cmds::SampleCoverage cmd;
+ cmd.Init(1, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
new file mode 100644
index 0000000..1d8ac40
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -0,0 +1,565 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTest2 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest2() { }
+
+ void TestAcceptedUniform(GLenum uniform_type, uint32 accepts_apis) {
+ SetupShaderForUniform(uniform_type);
+ bool valid_uniform = false;
+
+ EXPECT_CALL(*gl_, Uniform1i(1, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform2iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform3iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform4iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1f(1, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform2fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform3fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform4fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix2fv(1, _, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix3fv(1, _, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix4fv(1, _, _, _)).Times(AnyNumber());
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1i;
+ cmds::Uniform1i cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1i;
+ cmds::Uniform1ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform1ivImmediate>();
+ GLint data[2][1] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2i;
+ cmds::Uniform2i cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2i;
+ cmds::Uniform2ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform2ivImmediate>();
+ GLint data[2][2] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3i;
+ cmds::Uniform3i cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3i;
+ cmds::Uniform3ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform3ivImmediate>();
+ GLint data[2][3] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4i;
+ cmds::Uniform4i cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4i;
+ cmds::Uniform4ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform4ivImmediate>();
+ GLint data[2][4] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ ////////////////////
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1f;
+ cmds::Uniform1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1f;
+ cmds::Uniform1fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform1fvImmediate>();
+ GLfloat data[2][1] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2f;
+ cmds::Uniform2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2f;
+ cmds::Uniform2fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform2fvImmediate>();
+ GLfloat data[2][2] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3f;
+ cmds::Uniform3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3f;
+ cmds::Uniform3fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform3fvImmediate>();
+ GLfloat data[2][3] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4f;
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4f;
+ cmds::Uniform4fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform4fvImmediate>();
+ GLfloat data[2][4] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix2f;
+ cmds::UniformMatrix2fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix2fvImmediate>();
+ GLfloat data[2][2 * 2] = {{0.0f}};
+
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix3f;
+ cmds::UniformMatrix3fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix3fvImmediate>();
+ GLfloat data[2][3 * 3] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix4f;
+ cmds::UniformMatrix4fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix4fvImmediate>();
+ GLfloat data[2][4 * 4] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest2, ::testing::Bool());
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GenQueriesEXTImmediate, 0>(
+ bool valid) {
+ if (!valid) {
+ // Make the client_query_id_ so that trying to make it again
+ // will fail.
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetImmediateAs<cmds::GenQueriesEXTImmediate>();
+ cmd.Init(1, &client_query_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::DeleteQueriesEXTImmediate, 0>(
+ bool valid) {
+ if (valid) {
+ // Make the client_query_id_ so that trying to delete it will succeed.
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetImmediateAs<cmds::GenQueriesEXTImmediate>();
+ cmd.Init(1, &client_query_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
+ bool /* valid */) {
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ DoCreateShader(
+ GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientVertexShaderId), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientFragmentShaderId), true);
+
+ InSequence dummy;
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceVertexShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceFragmentShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1));
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, kClientVertexShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UseProgram, 0>(
+ bool /* valid */) {
+ // Needs the same setup as LinkProgram.
+ SpecializedSetup<cmds::LinkProgram, 0>(false);
+
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::ValidateProgram, 0>(
+ bool /* valid */) {
+ // Needs the same setup as LinkProgram.
+ SpecializedSetup<cmds::LinkProgram, 0>(false);
+
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix3fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix4fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterf, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameteri, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterfvImmediate, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterivImmediate, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h"
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT) {
+ TestAcceptedUniform(GL_INT, Program::kUniform1i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC2) {
+ TestAcceptedUniform(GL_INT_VEC2, Program::kUniform2i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC3) {
+ TestAcceptedUniform(GL_INT_VEC3, Program::kUniform3i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC4) {
+ TestAcceptedUniform(GL_INT_VEC4, Program::kUniform4i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL) {
+ TestAcceptedUniform(GL_BOOL, Program::kUniform1i | Program::kUniform1f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC2) {
+ TestAcceptedUniform(GL_BOOL_VEC2, Program::kUniform2i | Program::kUniform2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC3) {
+ TestAcceptedUniform(GL_BOOL_VEC3, Program::kUniform3i | Program::kUniform3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC4) {
+ TestAcceptedUniform(GL_BOOL_VEC4, Program::kUniform4i | Program::kUniform4f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniformTypeFLOAT) {
+ TestAcceptedUniform(GL_FLOAT, Program::kUniform1f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC2) {
+ TestAcceptedUniform(GL_FLOAT_VEC2, Program::kUniform2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC3) {
+ TestAcceptedUniform(GL_FLOAT_VEC3, Program::kUniform3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC4) {
+ TestAcceptedUniform(GL_FLOAT_VEC4, Program::kUniform4f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT2) {
+ TestAcceptedUniform(GL_FLOAT_MAT2, Program::kUniformMatrix2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT3) {
+ TestAcceptedUniform(GL_FLOAT_MAT3, Program::kUniformMatrix3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT4) {
+ TestAcceptedUniform(GL_FLOAT_MAT4, Program::kUniformMatrix4f);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
new file mode 100644
index 0000000..95c2027
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -0,0 +1,717 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_2.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+
+TEST_P(GLES2DecoderTest2, ScissorValidArgs) {
+ EXPECT_CALL(*gl_, Scissor(1, 2, 3, 4));
+ SpecializedSetup<cmds::Scissor, 0>(true);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ScissorInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, Scissor(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Scissor, 0>(false);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ScissorInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, Scissor(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Scissor, 0>(false);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+// TODO(gman): ShaderBinary
+
+// TODO(gman): ShaderSourceBucket
+
+TEST_P(GLES2DecoderTest2, StencilFuncValidArgs) {
+ EXPECT_CALL(*gl_, StencilFunc(GL_NEVER, 2, 3));
+ SpecializedSetup<cmds::StencilFunc, 0>(true);
+ cmds::StencilFunc cmd;
+ cmd.Init(GL_NEVER, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilFuncSeparateValidArgs) {
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_NEVER, 3, 4));
+ SpecializedSetup<cmds::StencilFuncSeparate, 0>(true);
+ cmds::StencilFuncSeparate cmd;
+ cmd.Init(GL_FRONT, GL_NEVER, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilMaskValidArgs) {
+ SpecializedSetup<cmds::StencilMask, 0>(true);
+ cmds::StencilMask cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilMaskSeparateValidArgs) {
+ SpecializedSetup<cmds::StencilMaskSeparate, 0>(true);
+ cmds::StencilMaskSeparate cmd;
+ cmd.Init(GL_FRONT, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilOpValidArgs) {
+ EXPECT_CALL(*gl_, StencilOp(GL_KEEP, GL_INCR, GL_KEEP));
+ SpecializedSetup<cmds::StencilOp, 0>(true);
+ cmds::StencilOp cmd;
+ cmd.Init(GL_KEEP, GL_INCR, GL_KEEP);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilOpSeparateValidArgs) {
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP));
+ SpecializedSetup<cmds::StencilOpSeparate, 0>(true);
+ cmds::StencilOpSeparate cmd;
+ cmd.Init(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): TexImage2D
+
+TEST_P(GLES2DecoderTest2, TexParameterfValidArgs) {
+ EXPECT_CALL(*gl_,
+ TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ SpecializedSetup<cmds::TexParameterf, 0>(true);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateValidArgs) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(true);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ TexParameterf(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ *reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs0_0) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs0_1) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs1_0) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriValidArgs) {
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ SpecializedSetup<cmds::TexParameteri, 0>(true);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateValidArgs) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(true);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ *reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs0_0) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs0_1) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs1_0) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+// TODO(gman): TexSubImage2D
+
+TEST_P(GLES2DecoderTest2, Uniform1fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform1f, 0>(true);
+ cmds::Uniform1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform1fvImmediateValidArgs) {
+ cmds::Uniform1fvImmediate& cmd = *GetImmediateAs<cmds::Uniform1fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform1fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform1fvImmediate, 0>(true);
+ GLfloat temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): Uniform1i
+// TODO(gman): Uniform1ivImmediate
+
+TEST_P(GLES2DecoderTest2, Uniform2fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform2fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform2f, 0>(true);
+ cmds::Uniform2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2fvImmediateValidArgs) {
+ cmds::Uniform2fvImmediate& cmd = *GetImmediateAs<cmds::Uniform2fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform2fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform2fvImmediate, 0>(true);
+ GLfloat temp[2 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform2iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform2i, 0>(true);
+ cmds::Uniform2i cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2ivImmediateValidArgs) {
+ cmds::Uniform2ivImmediate& cmd = *GetImmediateAs<cmds::Uniform2ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform2iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform2ivImmediate, 0>(true);
+ GLint temp[2 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform3fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform3f, 0>(true);
+ cmds::Uniform3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3fvImmediateValidArgs) {
+ cmds::Uniform3fvImmediate& cmd = *GetImmediateAs<cmds::Uniform3fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform3fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform3fvImmediate, 0>(true);
+ GLfloat temp[3 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform3iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform3i, 0>(true);
+ cmds::Uniform3i cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3ivImmediateValidArgs) {
+ cmds::Uniform3ivImmediate& cmd = *GetImmediateAs<cmds::Uniform3ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform3iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform3ivImmediate, 0>(true);
+ GLint temp[3 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4f, 0>(true);
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4fvImmediateValidArgs) {
+ cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform4fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true);
+ GLfloat temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4i, 0>(true);
+ cmds::Uniform4i cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4ivImmediateValidArgs) {
+ cmds::Uniform4ivImmediate& cmd = *GetImmediateAs<cmds::Uniform4ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform4iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform4ivImmediate, 0>(true);
+ GLint temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix2fvImmediateValidArgs) {
+ cmds::UniformMatrix2fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix2fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix2fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(true);
+ GLfloat temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix3fvImmediateValidArgs) {
+ cmds::UniformMatrix3fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix3fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix3fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix3fvImmediate, 0>(true);
+ GLfloat temp[9 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix4fvImmediateValidArgs) {
+ cmds::UniformMatrix4fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix4fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix4fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix4fvImmediate, 0>(true);
+ GLfloat temp[16 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UseProgramValidArgs) {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId));
+ SpecializedSetup<cmds::UseProgram, 0>(true);
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UseProgramInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, UseProgram(_)).Times(0);
+ SpecializedSetup<cmds::UseProgram, 0>(false);
+ cmds::UseProgram cmd;
+ cmd.Init(kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ValidateProgramValidArgs) {
+ EXPECT_CALL(*gl_, ValidateProgram(kServiceProgramId));
+ SpecializedSetup<cmds::ValidateProgram, 0>(true);
+ cmds::ValidateProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib1fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib1f(1, 2));
+ SpecializedSetup<cmds::VertexAttrib1f, 0>(true);
+ cmds::VertexAttrib1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib1fvImmediateValidArgs) {
+ cmds::VertexAttrib1fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib1fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib1fvImmediate, 0>(true);
+ GLfloat temp[1] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib1fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib2fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib2f(1, 2, 3));
+ SpecializedSetup<cmds::VertexAttrib2f, 0>(true);
+ cmds::VertexAttrib2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib2fvImmediateValidArgs) {
+ cmds::VertexAttrib2fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib2fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib2fvImmediate, 0>(true);
+ GLfloat temp[2] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib2fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib3fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib3f(1, 2, 3, 4));
+ SpecializedSetup<cmds::VertexAttrib3f, 0>(true);
+ cmds::VertexAttrib3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib3fvImmediateValidArgs) {
+ cmds::VertexAttrib3fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib3fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib3fvImmediate, 0>(true);
+ GLfloat temp[3] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib3fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib4fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(1, 2, 3, 4, 5));
+ SpecializedSetup<cmds::VertexAttrib4f, 0>(true);
+ cmds::VertexAttrib4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib4fvImmediateValidArgs) {
+ cmds::VertexAttrib4fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib4fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib4fvImmediate, 0>(true);
+ GLfloat temp[4] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib4fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): VertexAttribPointer
+
+TEST_P(GLES2DecoderTest2, ViewportValidArgs) {
+ EXPECT_CALL(*gl_, Viewport(1, 2, 3, 4));
+ SpecializedSetup<cmds::Viewport, 0>(true);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ViewportInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, Viewport(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Viewport, 0>(false);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ViewportInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, Viewport(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Viewport, 0>(false);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+// TODO(gman): TexStorage2DEXT
+// TODO(gman): GenQueriesEXTImmediate
+// TODO(gman): DeleteQueriesEXTImmediate
+// TODO(gman): BeginQueryEXT
+
+// TODO(gman): EndQueryEXT
+
+// TODO(gman): InsertEventMarkerEXT
+
+// TODO(gman): PushGroupMarkerEXT
+
+TEST_P(GLES2DecoderTest2, PopGroupMarkerEXTValidArgs) {
+ SpecializedSetup<cmds::PopGroupMarkerEXT, 0>(true);
+ cmds::PopGroupMarkerEXT cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): GenVertexArraysOESImmediate
+// TODO(gman): DeleteVertexArraysOESImmediate
+// TODO(gman): IsVertexArrayOES
+// TODO(gman): BindVertexArrayOES
+// TODO(gman): SwapBuffers
+// TODO(gman): GetMaxValueInBufferCHROMIUM
+// TODO(gman): EnableFeatureCHROMIUM
+
+// TODO(gman): ResizeCHROMIUM
+// TODO(gman): GetRequestableExtensionsCHROMIUM
+
+// TODO(gman): RequestExtensionCHROMIUM
+
+// TODO(gman): GetMultipleIntegervCHROMIUM
+
+// TODO(gman): GetProgramInfoCHROMIUM
+
+// TODO(gman): GetTranslatedShaderSourceANGLE
+// TODO(gman): PostSubBufferCHROMIUM
+// TODO(gman): TexImageIOSurface2DCHROMIUM
+// TODO(gman): CopyTextureCHROMIUM
+// TODO(gman): DrawArraysInstancedANGLE
+// TODO(gman): DrawElementsInstancedANGLE
+// TODO(gman): VertexAttribDivisorANGLE
+// TODO(gman): GenMailboxCHROMIUM
+
+// TODO(gman): ProduceTextureCHROMIUMImmediate
+// TODO(gman): ProduceTextureDirectCHROMIUMImmediate
+// TODO(gman): ConsumeTextureCHROMIUMImmediate
+// TODO(gman): CreateAndConsumeTextureCHROMIUMImmediate
+// TODO(gman): BindUniformLocationCHROMIUMBucket
+// TODO(gman): BindTexImage2DCHROMIUM
+// TODO(gman): ReleaseTexImage2DCHROMIUM
+// TODO(gman): TraceBeginCHROMIUM
+
+// TODO(gman): TraceEndCHROMIUM
+// TODO(gman): AsyncTexSubImage2DCHROMIUM
+
+// TODO(gman): AsyncTexImage2DCHROMIUM
+
+// TODO(gman): WaitAsyncTexImage2DCHROMIUM
+
+// TODO(gman): WaitAllAsyncTexImage2DCHROMIUM
+
+// TODO(gman): LoseContextCHROMIUM
+// TODO(gman): InsertSyncPointCHROMIUM
+
+// TODO(gman): WaitSyncPointCHROMIUM
+
+// TODO(gman): DrawBuffersEXTImmediate
+// TODO(gman): DiscardBackbufferCHROMIUM
+
+// TODO(gman): ScheduleOverlayPlaneCHROMIUM
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
new file mode 100644
index 0000000..3fadaf0
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderTest3 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest3() { }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest3, ::testing::Bool());
+
+TEST_P(GLES2DecoderTest3, TraceBeginCHROMIUM) {
+ const uint32 kBucketId = 123;
+ const char kName[] = "test_command";
+ SetBucketAsCString(kBucketId, kName);
+
+ TraceBeginCHROMIUM begin_cmd;
+ begin_cmd.Init(kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+}
+
+TEST_P(GLES2DecoderTest3, TraceEndCHROMIUM) {
+ // Test end fails if no begin.
+ TraceEndCHROMIUM end_cmd;
+ end_cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ const uint32 kBucketId = 123;
+ const char kName[] = "test_command";
+ SetBucketAsCString(kBucketId, kName);
+
+ TraceBeginCHROMIUM begin_cmd;
+ begin_cmd.Init(kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+
+ end_cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
new file mode 100644
index 0000000..7e93f36
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_3.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc
new file mode 100644
index 0000000..d32870d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc
@@ -0,0 +1,388 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderManualInitTest, AsyncPixelTransfers) {
+ InitState init;
+ init.extensions = "GL_CHROMIUM_async_pixel_transfers";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Set up the texture.
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ Texture* texture = texture_ref->texture();
+
+ // Set a mock Async delegate
+ StrictMock<gpu::MockAsyncPixelTransferManager>* manager =
+ new StrictMock<gpu::MockAsyncPixelTransferManager>;
+ manager->Initialize(group().texture_manager());
+ decoder_->SetAsyncPixelTransferManagerForTest(manager);
+ StrictMock<gpu::MockAsyncPixelTransferDelegate>* delegate = NULL;
+
+ // Tex(Sub)Image2D upload commands.
+ AsyncTexImage2DCHROMIUM teximage_cmd;
+ teximage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+ AsyncTexSubImage2DCHROMIUM texsubimage_cmd;
+ texsubimage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+ WaitAsyncTexImage2DCHROMIUM wait_cmd;
+ wait_cmd.Init(GL_TEXTURE_2D);
+ WaitAllAsyncTexImage2DCHROMIUM wait_all_cmd;
+ wait_all_cmd.Init();
+
+ // No transfer state exists initially.
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ base::Closure bind_callback;
+
+ // AsyncTexImage2D
+ {
+ // Create transfer state since it doesn't exist.
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _))
+ .WillOnce(SaveArg<2>(&bind_callback))
+ .RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ // The texture is safe but the level has not been defined yet.
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ GLsizei width, height;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ }
+ {
+ // Async redefinitions are not allowed!
+ // Command fails.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+
+ // Binding/defining of the async transfer
+ {
+ // TODO(epenner): We should check that the manager gets the
+ // BindCompletedAsyncTransfers() call, which is required to
+ // guarantee the delegate calls the bind callback.
+
+ // Simulate the bind callback from the delegate.
+ bind_callback.Run();
+
+ // After the bind callback is run, the texture is safe,
+ // and has the right size etc.
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ GLsizei width, height;
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(width, 8);
+ EXPECT_EQ(height, 8);
+ }
+
+ // AsyncTexSubImage2D
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->GetAsyncPixelTransferManager()->ClearPixelTransferDelegateForTest(
+ texture_ref);
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ texture->SetImmutable(false);
+ {
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexSubImage2D(_, _)).RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+ {
+ // No transfer is in progress.
+ EXPECT_CALL(*delegate, TransferIsInProgress())
+ .WillOnce(Return(false)) // texSubImage validation
+ .WillOnce(Return(false)) // async validation
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexSubImage2D(_, _)).RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+ {
+ // A transfer is still in progress!
+ EXPECT_CALL(*delegate, TransferIsInProgress())
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ // No async call, command fails.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+
+ // Delete delegate on DeleteTexture.
+ {
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
+ delegate = NULL;
+ }
+
+ // WaitAsyncTexImage2D
+ {
+ // Get a fresh texture since the existing texture cannot be respecified
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ texture_ref = GetTexture(client_texture_id_);
+ texture = texture_ref->texture();
+ texture->SetImmutable(false);
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+ // Start async transfer.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ EXPECT_TRUE(texture->IsImmutable());
+ // Wait for completion.
+ EXPECT_CALL(*delegate, WaitForTransferCompletion());
+ EXPECT_CALL(*manager, BindCompletedAsyncTransfers());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(wait_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // WaitAllAsyncTexImage2D
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
+ delegate = NULL;
+ {
+ // Get a fresh texture since the existing texture cannot be respecified
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ texture_ref = GetTexture(client_texture_id_);
+ texture = texture_ref->texture();
+ texture->SetImmutable(false);
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+ // Start async transfer.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ EXPECT_TRUE(texture->IsImmutable());
+ // Wait for completion of all uploads.
+ EXPECT_CALL(*manager, WaitAllAsyncTexImage2D()).RetiresOnSaturation();
+ EXPECT_CALL(*manager, BindCompletedAsyncTransfers());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(wait_all_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Remove PixelTransferManager before the decoder destroys.
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->ResetAsyncPixelTransferManagerForTest();
+ manager = NULL;
+}
+
+TEST_P(GLES2DecoderManualInitTest, AsyncPixelTransferManager) {
+ InitState init;
+ init.extensions = "GL_CHROMIUM_async_pixel_transfers";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Set up the texture.
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+
+ // Set a mock Async delegate.
+ StrictMock<gpu::MockAsyncPixelTransferManager>* manager =
+ new StrictMock<gpu::MockAsyncPixelTransferManager>;
+ manager->Initialize(group().texture_manager());
+ decoder_->SetAsyncPixelTransferManagerForTest(manager);
+ StrictMock<gpu::MockAsyncPixelTransferDelegate>* delegate = NULL;
+
+ AsyncTexImage2DCHROMIUM teximage_cmd;
+ teximage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+
+ // No transfer delegate exists initially.
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ // Create delegate on AsyncTexImage2D.
+ {
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Delegate is cached.
+ EXPECT_EQ(delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ // Delete delegate on manager teardown.
+ {
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->ResetAsyncPixelTransferManagerForTest();
+ manager = NULL;
+
+ // Texture ref still valid.
+ EXPECT_EQ(texture_ref, GetTexture(client_texture_id_));
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
new file mode 100644
index 0000000..36b14e5
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -0,0 +1,484 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderWithShaderTest, GetVertexAttribPointervSucceeds) {
+ const float dummy = 0;
+ const GLuint kOffsetToTestFor = sizeof(dummy) * 4;
+ const GLuint kIndexToTest = 1;
+ GetVertexAttribPointerv::Result* result =
+ static_cast<GetVertexAttribPointerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ const GLuint* result_value = result->GetData();
+ // Test that initial value is 0.
+ GetVertexAttribPointerv cmd;
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(sizeof(*result_value), result->size);
+ EXPECT_EQ(0u, *result_value);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Set the value and see that we get it.
+ SetupVertexBuffer();
+ DoVertexAttribPointer(kIndexToTest, 2, GL_FLOAT, 0, kOffsetToTestFor);
+ result->size = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(sizeof(*result_value), result->size);
+ EXPECT_EQ(kOffsetToTestFor, *result_value);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetVertexAttribPointervBadArgsFails) {
+ const GLuint kIndexToTest = 1;
+ GetVertexAttribPointerv::Result* result =
+ static_cast<GetVertexAttribPointerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ const GLuint* result_value = result->GetData();
+ // Test pname invalid fails.
+ GetVertexAttribPointerv cmd;
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER + 1,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(kInitialResult, *result_value);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ // Test index out of range fails.
+ result->size = 0;
+ cmd.Init(kNumVertexAttribs,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(kInitialResult, *result_value);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // Test memory id bad fails.
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Test memory offset bad fails.
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindBufferToDifferentTargetFails) {
+ // Bind the buffer to GL_ARRAY_BUFFER
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ // Attempt to rebind to GL_ELEMENT_ARRAY_BUFFER
+ // NOTE: Real GLES2 does not have this restriction but WebGL and we do.
+ // This can be restriction can be removed at runtime.
+ EXPECT_CALL(*gl_, BindBuffer(_, _)).Times(0);
+ BindBuffer cmd;
+ cmd.Init(GL_ELEMENT_ARRAY_BUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, VertexAttribPointer) {
+ SetupVertexBuffer();
+ static const GLenum types[] = {
+ GL_BYTE, GL_UNSIGNED_BYTE, GL_SHORT, GL_UNSIGNED_SHORT,
+ GL_FLOAT, GL_FIXED, GL_INT, GL_UNSIGNED_INT,
+ };
+ static const GLsizei sizes[] = {
+ 1, 1, 2, 2, 4, 4, 4, 4,
+ };
+ static const GLuint indices[] = {
+ 0, 1, kNumVertexAttribs - 1, kNumVertexAttribs,
+ };
+ static const GLsizei offset_mult[] = {
+ 0, 0, 1, 1, 2, 1000,
+ };
+ static const GLsizei offset_offset[] = {
+ 0, 1, 0, 1, 0, 0,
+ };
+ static const GLsizei stride_mult[] = {
+ -1, 0, 0, 1, 1, 2, 1000,
+ };
+ static const GLsizei stride_offset[] = {
+ 0, 0, 1, 0, 1, 0, 0,
+ };
+ for (size_t tt = 0; tt < arraysize(types); ++tt) {
+ GLenum type = types[tt];
+ GLsizei num_bytes = sizes[tt];
+ for (size_t ii = 0; ii < arraysize(indices); ++ii) {
+ GLuint index = indices[ii];
+ for (GLint size = 0; size < 5; ++size) {
+ for (size_t oo = 0; oo < arraysize(offset_mult); ++oo) {
+ GLuint offset = num_bytes * offset_mult[oo] + offset_offset[oo];
+ for (size_t ss = 0; ss < arraysize(stride_mult); ++ss) {
+ GLsizei stride = num_bytes * stride_mult[ss] + stride_offset[ss];
+ for (int normalize = 0; normalize < 2; ++normalize) {
+ bool index_good = index < static_cast<GLuint>(kNumVertexAttribs);
+ bool size_good = (size > 0 && size < 5);
+ bool offset_good = (offset % num_bytes == 0);
+ bool stride_good =
+ (stride % num_bytes == 0) && stride >= 0 && stride <= 255;
+ bool type_good = (type != GL_INT && type != GL_UNSIGNED_INT &&
+ type != GL_FIXED);
+ bool good = size_good && offset_good && stride_good &&
+ type_good && index_good;
+ bool call = good && (type != GL_FIXED);
+ if (call) {
+ EXPECT_CALL(*gl_,
+ VertexAttribPointer(index,
+ size,
+ type,
+ normalize,
+ stride,
+ BufferOffset(offset)));
+ }
+ VertexAttribPointer cmd;
+ cmd.Init(index, size, type, normalize, stride, offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ if (good) {
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ } else if (size_good && offset_good && stride_good && type_good &&
+ !index_good) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else if (size_good && offset_good && stride_good &&
+ !type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ } else if (size_good && offset_good && !stride_good &&
+ type_good && index_good) {
+ if (stride < 0 || stride > 255) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else {
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+ } else if (size_good && !offset_good && stride_good &&
+ type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ } else if (!size_good && offset_good && stride_good &&
+ type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else {
+ EXPECT_NE(GL_NO_ERROR, GetGLError());
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+class GLES2DecoderVertexArraysOESTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderVertexArraysOESTest() {}
+
+ bool vertex_array_deleted_manually_;
+
+ virtual void SetUp() {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ AddExpectationsForGenVertexArraysOES();
+ GenHelper<GenVertexArraysOESImmediate>(client_vertexarray_id_);
+
+ vertex_array_deleted_manually_ = false;
+ }
+
+ virtual void TearDown() {
+ // This should only be set if the test handled deletion of the vertex array
+ // itself. Necessary because vertex_array_objects are not sharable, and thus
+ // not managed in the ContextGroup, meaning they will be destroyed during
+ // test tear down
+ if (!vertex_array_deleted_manually_) {
+ AddExpectationsForDeleteVertexArraysOES();
+ }
+
+ GLES2DecoderWithShaderTest::TearDown();
+ }
+
+ void GenVertexArraysOESImmediateValidArgs() {
+ AddExpectationsForGenVertexArraysOES();
+ GenVertexArraysOESImmediate* cmd =
+ GetImmediateAs<GenVertexArraysOESImmediate>();
+ GLuint temp = kNewClientId;
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(kNewClientId) != NULL);
+ AddExpectationsForDeleteVertexArraysOES();
+ }
+
+ void GenVertexArraysOESImmediateInvalidArgs() {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(_, _)).Times(0);
+ GenVertexArraysOESImmediate* cmd =
+ GetImmediateAs<GenVertexArraysOESImmediate>();
+ cmd->Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_vertexarray_id_)));
+ }
+
+ void DeleteVertexArraysOESImmediateValidArgs() {
+ AddExpectationsForDeleteVertexArraysOES();
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ cmd.Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_vertexarray_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(client_vertexarray_id_) == NULL);
+ vertex_array_deleted_manually_ = true;
+ }
+
+ void DeleteVertexArraysOESImmediateInvalidArgs() {
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ }
+
+ void DeleteBoundVertexArraysOESImmediateValidArgs() {
+ BindVertexArrayOESValidArgs();
+
+ AddExpectationsForDeleteBoundVertexArraysOES();
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ cmd.Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_vertexarray_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(client_vertexarray_id_) == NULL);
+ vertex_array_deleted_manually_ = true;
+ }
+
+ void IsVertexArrayOESValidArgs() {
+ IsVertexArrayOES cmd;
+ cmd.Init(client_vertexarray_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ void IsVertexArrayOESInvalidArgsBadSharedMemoryId() {
+ IsVertexArrayOES cmd;
+ cmd.Init(
+ client_vertexarray_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_vertexarray_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ }
+
+ void BindVertexArrayOESValidArgs() {
+ AddExpectationsForBindVertexArrayOES();
+ BindVertexArrayOES cmd;
+ cmd.Init(client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ void BindVertexArrayOESValidArgsNewId() {
+ BindVertexArrayOES cmd;
+ cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderVertexArraysOESTest,
+ ::testing::Bool());
+
+class GLES2DecoderEmulatedVertexArraysOESTest
+ : public GLES2DecoderVertexArraysOESTest {
+ public:
+ GLES2DecoderEmulatedVertexArraysOESTest() {}
+
+ virtual void SetUp() {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ init.use_native_vao = false;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ AddExpectationsForGenVertexArraysOES();
+ GenHelper<GenVertexArraysOESImmediate>(client_vertexarray_id_);
+
+ vertex_array_deleted_manually_ = false;
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderEmulatedVertexArraysOESTest,
+ ::testing::Bool());
+
+// Test vertex array objects with native support
+TEST_P(GLES2DecoderVertexArraysOESTest, GenVertexArraysOESImmediateValidArgs) {
+ GenVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ GenVertexArraysOESImmediateValidArgs) {
+ GenVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ GenVertexArraysOESImmediateInvalidArgs) {
+ GenVertexArraysOESImmediateInvalidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ GenVertexArraysOESImmediateInvalidArgs) {
+ GenVertexArraysOESImmediateInvalidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateValidArgs) {
+ DeleteVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateValidArgs) {
+ DeleteVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateInvalidArgs) {
+ DeleteVertexArraysOESImmediateInvalidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateInvalidArgs) {
+ DeleteVertexArraysOESImmediateInvalidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteBoundVertexArraysOESImmediateValidArgs) {
+ DeleteBoundVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteBoundVertexArraysOESImmediateValidArgs) {
+ DeleteBoundVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, IsVertexArrayOESValidArgs) {
+ IsVertexArrayOESValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest, IsVertexArrayOESValidArgs) {
+ IsVertexArrayOESValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId) {
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId) {
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, BindVertexArrayOESValidArgs) {
+ BindVertexArrayOESValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest, BindVertexArrayOESValidArgs) {
+ BindVertexArrayOESValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, BindVertexArrayOESValidArgsNewId) {
+ BindVertexArrayOESValidArgsNewId();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ BindVertexArrayOESValidArgsNewId) {
+ BindVertexArrayOESValidArgsNewId();
+}
+
+TEST_P(GLES2DecoderTest, BufferDataGLError) {
+ GLenum target = GL_ARRAY_BUFFER;
+ GLsizeiptr size = 4;
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ BufferManager* manager = group().buffer_manager();
+ Buffer* buffer = manager->GetBuffer(client_buffer_id_);
+ ASSERT_TRUE(buffer != NULL);
+ EXPECT_EQ(0, buffer->size());
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(target, size, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ BufferData cmd;
+ cmd.Init(target, size, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(0, buffer->size());
+}
+
+// TODO(gman): BufferData
+
+// TODO(gman): BufferDataImmediate
+
+// TODO(gman): BufferSubData
+
+// TODO(gman): BufferSubDataImmediate
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
new file mode 100644
index 0000000..136834d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -0,0 +1,1679 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgPointee;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+using ::testing::WithArg;
+
+namespace {
+
+void NormalizeInitState(gpu::gles2::GLES2DecoderTestBase::InitState* init) {
+ CHECK(init);
+ const char* kVAOExtensions[] = {
+ "GL_OES_vertex_array_object",
+ "GL_ARB_vertex_array_object",
+ "GL_APPLE_vertex_array_object"
+ };
+ bool contains_vao_extension = false;
+ for (size_t ii = 0; ii < arraysize(kVAOExtensions); ++ii) {
+ if (init->extensions.find(kVAOExtensions[ii]) != std::string::npos) {
+ contains_vao_extension = true;
+ break;
+ }
+ }
+ if (init->use_native_vao) {
+ if (contains_vao_extension)
+ return;
+ if (!init->extensions.empty())
+ init->extensions += " ";
+ if (StartsWithASCII(init->gl_version, "opengl es", false)) {
+ init->extensions += kVAOExtensions[0];
+ } else {
+#if !defined(OS_MACOSX)
+ init->extensions += kVAOExtensions[1];
+#else
+ init->extensions += kVAOExtensions[2];
+#endif // OS_MACOSX
+ }
+ } else {
+ // Make sure we don't set up an invalid InitState.
+ CHECK(!contains_vao_extension);
+ }
+}
+
+} // namespace Anonymous
+
+namespace gpu {
+namespace gles2 {
+
+GLES2DecoderTestBase::GLES2DecoderTestBase()
+ : surface_(NULL),
+ context_(NULL),
+ memory_tracker_(NULL),
+ client_buffer_id_(100),
+ client_framebuffer_id_(101),
+ client_program_id_(102),
+ client_renderbuffer_id_(103),
+ client_shader_id_(104),
+ client_texture_id_(106),
+ client_element_buffer_id_(107),
+ client_vertex_shader_id_(121),
+ client_fragment_shader_id_(122),
+ client_query_id_(123),
+ client_vertexarray_id_(124),
+ service_renderbuffer_id_(0),
+ service_renderbuffer_valid_(false),
+ ignore_cached_state_for_test_(GetParam()),
+ cached_color_mask_red_(true),
+ cached_color_mask_green_(true),
+ cached_color_mask_blue_(true),
+ cached_color_mask_alpha_(true),
+ cached_depth_mask_(true),
+ cached_stencil_front_mask_(static_cast<GLuint>(-1)),
+ cached_stencil_back_mask_(static_cast<GLuint>(-1)) {
+ memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
+}
+
+GLES2DecoderTestBase::~GLES2DecoderTestBase() {}
+
+void GLES2DecoderTestBase::SetUp() {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+}
+
+void GLES2DecoderTestBase::AddExpectationsForVertexAttribManager() {
+ for (GLint ii = 0; ii < kNumVertexAttribs; ++ii) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(ii, 0.0f, 0.0f, 0.0f, 1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+GLES2DecoderTestBase::InitState::InitState()
+ : has_alpha(false),
+ has_depth(false),
+ has_stencil(false),
+ request_alpha(false),
+ request_depth(false),
+ request_stencil(false),
+ bind_generates_resource(false),
+ lose_context_when_out_of_memory(false),
+ use_native_vao(true) {
+}
+
+void GLES2DecoderTestBase::InitDecoder(const InitState& init) {
+ InitDecoderWithCommandLine(init, NULL);
+}
+
+void GLES2DecoderTestBase::InitDecoderWithCommandLine(
+ const InitState& init,
+ const base::CommandLine* command_line) {
+ InitState normalized_init = init;
+ NormalizeInitState(&normalized_init);
+ Framebuffer::ClearFramebufferCompleteComboMap();
+
+ gfx::SetGLGetProcAddressProc(gfx::MockGLInterface::GetGLProcAddress);
+ gfx::GLSurface::InitializeOneOffWithMockBindingsForTests();
+
+ gl_.reset(new StrictMock<MockGLInterface>());
+ ::gfx::MockGLInterface::SetGLInterface(gl_.get());
+
+ SetupMockGLBehaviors();
+
+ // Only create stream texture manager if extension is requested.
+ std::vector<std::string> list;
+ base::SplitString(normalized_init.extensions, ' ', &list);
+ scoped_refptr<FeatureInfo> feature_info;
+ if (command_line)
+ feature_info = new FeatureInfo(*command_line);
+ group_ = scoped_refptr<ContextGroup>(
+ new ContextGroup(NULL,
+ memory_tracker_,
+ new ShaderTranslatorCache,
+ feature_info.get(),
+ normalized_init.bind_generates_resource));
+ bool use_default_textures = normalized_init.bind_generates_resource;
+
+ InSequence sequence;
+
+ surface_ = new gfx::GLSurfaceStub;
+ surface_->SetSize(gfx::Size(kBackBufferWidth, kBackBufferHeight));
+
+ // Context needs to be created before initializing ContextGroup, which will
+ // in turn initialize FeatureInfo, which needs a context to determine
+ // extension support.
+ context_ = new gfx::GLContextStubWithExtensions;
+ context_->AddExtensionsString(normalized_init.extensions.c_str());
+ context_->SetGLVersionString(normalized_init.gl_version.c_str());
+
+ context_->MakeCurrent(surface_.get());
+ gfx::GLSurface::InitializeDynamicMockBindingsForTests(context_.get());
+
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(),
+ DisallowedFeatures(),
+ normalized_init.extensions.c_str(),
+ normalized_init.gl_version.c_str(),
+ normalized_init.bind_generates_resource);
+
+ // We initialize the ContextGroup with a MockGLES2Decoder so that
+ // we can use the ContextGroup to figure out how the real GLES2Decoder
+ // will initialize itself.
+ mock_decoder_.reset(new MockGLES2Decoder());
+
+ // Install FakeDoCommands handler so we can use individual DoCommand()
+ // expectations.
+ EXPECT_CALL(*mock_decoder_, DoCommands(_, _, _, _)).WillRepeatedly(
+ Invoke(mock_decoder_.get(), &MockGLES2Decoder::FakeDoCommands));
+
+ EXPECT_TRUE(
+ group_->Initialize(mock_decoder_.get(), DisallowedFeatures()));
+
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceVertexArrayId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindVertexArrayOES(_)).Times(1).RetiresOnSaturation();
+ }
+
+ if (group_->feature_info()->workarounds().init_vertex_attributes)
+ AddExpectationsForVertexAttribManager();
+
+ AddExpectationsForBindVertexArrayOES();
+
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(0))
+ .Times(1)
+ .RetiresOnSaturation();
+ static GLuint attrib_0_id[] = {
+ kServiceAttrib0BufferId,
+ };
+ static GLuint fixed_attrib_buffer_id[] = {
+ kServiceFixedAttribBufferId,
+ };
+ EXPECT_CALL(*gl_, GenBuffersARB(arraysize(attrib_0_id), _))
+ .WillOnce(SetArrayArgument<1>(attrib_0_id,
+ attrib_0_id + arraysize(attrib_0_id)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceAttrib0BufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GenBuffersARB(arraysize(fixed_attrib_buffer_id), _))
+ .WillOnce(SetArrayArgument<1>(
+ fixed_attrib_buffer_id,
+ fixed_attrib_buffer_id + arraysize(fixed_attrib_buffer_id)))
+ .RetiresOnSaturation();
+
+ for (GLint tt = 0; tt < TestHelper::kNumTextureUnits; ++tt) {
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0 + tt))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().oes_egl_image_external) {
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_EXTERNAL_OES,
+ use_default_textures
+ ? TestHelper::kServiceDefaultExternalTextureId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (group_->feature_info()->feature_flags().arb_texture_rectangle) {
+ EXPECT_CALL(
+ *gl_,
+ BindTexture(GL_TEXTURE_RECTANGLE_ARB,
+ use_default_textures
+ ? TestHelper::kServiceDefaultRectangleTextureId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_CUBE_MAP,
+ use_default_textures
+ ? TestHelper::kServiceDefaultTextureCubemapId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ BindTexture(
+ GL_TEXTURE_2D,
+ use_default_textures ? TestHelper::kServiceDefaultTexture2dId : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_alpha ? 8 : 0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_depth ? 24 : 0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_stencil ? 8 : 0))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, Enable(GL_VERTEX_PROGRAM_POINT_SIZE))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, Enable(GL_POINT_SPRITE))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ static GLint max_viewport_dims[] = {
+ kMaxViewportWidth,
+ kMaxViewportHeight
+ };
+ EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VIEWPORT_DIMS, _))
+ .WillOnce(SetArrayArgument<1>(
+ max_viewport_dims, max_viewport_dims + arraysize(max_viewport_dims)))
+ .RetiresOnSaturation();
+
+ SetupInitCapabilitiesExpectations();
+ SetupInitStateExpectations();
+
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(GL_RENDERBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ // TODO(boliu): Remove OS_ANDROID once crbug.com/259023 is fixed and the
+ // workaround has been reverted.
+#if !defined(OS_ANDROID)
+ EXPECT_CALL(*gl_, Clear(
+ GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT))
+ .Times(1)
+ .RetiresOnSaturation();
+#endif
+
+ engine_.reset(new StrictMock<MockCommandBufferEngine>());
+ scoped_refptr<gpu::Buffer> buffer =
+ engine_->GetSharedMemoryBuffer(kSharedMemoryId);
+ shared_memory_offset_ = kSharedMemoryOffset;
+ shared_memory_address_ =
+ reinterpret_cast<int8*>(buffer->memory()) + shared_memory_offset_;
+ shared_memory_id_ = kSharedMemoryId;
+ shared_memory_base_ = buffer->memory();
+
+ static const int32 kLoseContextWhenOutOfMemory = 0x10002;
+
+ int32 attributes[] = {
+ EGL_ALPHA_SIZE,
+ normalized_init.request_alpha ? 8 : 0,
+ EGL_DEPTH_SIZE,
+ normalized_init.request_depth ? 24 : 0,
+ EGL_STENCIL_SIZE,
+ normalized_init.request_stencil ? 8 : 0,
+ kLoseContextWhenOutOfMemory,
+ normalized_init.lose_context_when_out_of_memory ? 1 : 0, };
+ std::vector<int32> attribs(attributes, attributes + arraysize(attributes));
+
+ decoder_.reset(GLES2Decoder::Create(group_.get()));
+ decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
+ decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
+ decoder_->Initialize(surface_,
+ context_,
+ false,
+ surface_->GetSize(),
+ DisallowedFeatures(),
+ attribs);
+ decoder_->MakeCurrent();
+ decoder_->set_engine(engine_.get());
+ decoder_->BeginDecoding();
+
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceBufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenBuffersImmediate>(client_buffer_id_);
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceFramebufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenFramebuffersImmediate>(client_framebuffer_id_);
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceRenderbufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenRenderbuffersImmediate>(client_renderbuffer_id_);
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenTexturesImmediate>(client_texture_id_);
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceElementBufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenBuffersImmediate>(client_element_buffer_id_);
+
+ DoCreateProgram(client_program_id_, kServiceProgramId);
+ DoCreateShader(GL_VERTEX_SHADER, client_shader_id_, kServiceShaderId);
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+void GLES2DecoderTestBase::ResetDecoder() {
+ if (!decoder_.get())
+ return;
+ // All Tests should have read all their GLErrors before getting here.
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, _))
+ .Times(2)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, Pointee(kServiceVertexArrayId)))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ decoder_->EndDecoding();
+ decoder_->Destroy(true);
+ decoder_.reset();
+ group_->Destroy(mock_decoder_.get(), false);
+ engine_.reset();
+ ::gfx::MockGLInterface::SetGLInterface(NULL);
+ gl_.reset();
+ gfx::ClearGLBindings();
+}
+
+void GLES2DecoderTestBase::TearDown() {
+ ResetDecoder();
+}
+
+void GLES2DecoderTestBase::ExpectEnableDisable(GLenum cap, bool enable) {
+ if (enable) {
+ EXPECT_CALL(*gl_, Enable(cap))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, Disable(cap))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+
+GLint GLES2DecoderTestBase::GetGLError() {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ return static_cast<GLint>(*GetSharedMemoryAs<GLenum*>());
+}
+
+void GLES2DecoderTestBase::DoCreateShader(
+ GLenum shader_type, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, CreateShader(shader_type))
+ .Times(1)
+ .WillOnce(Return(service_id))
+ .RetiresOnSaturation();
+ cmds::CreateShader cmd;
+ cmd.Init(shader_type, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsShader(GLuint client_id) {
+ return IsObjectHelper<cmds::IsShader, cmds::IsShader::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteShader(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteShader(service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::DeleteShader cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoCreateProgram(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(service_id))
+ .RetiresOnSaturation();
+ cmds::CreateProgram cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsProgram(GLuint client_id) {
+ return IsObjectHelper<cmds::IsProgram, cmds::IsProgram::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteProgram(
+ GLuint client_id, GLuint /* service_id */) {
+ cmds::DeleteProgram cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::SetBucketAsCString(
+ uint32 bucket_id, const char* str) {
+ uint32 size = str ? (strlen(str) + 1) : 0;
+ cmd::SetBucketSize cmd1;
+ cmd1.Init(bucket_id, size);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ if (str) {
+ memcpy(shared_memory_address_, str, size);
+ cmd::SetBucketData cmd2;
+ cmd2.Init(bucket_id, 0, size, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ ClearSharedMemory();
+ }
+}
+
+void GLES2DecoderTestBase::SetupClearTextureExpectations(
+ GLuint service_id,
+ GLuint old_service_id,
+ GLenum bind_target,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLenum format,
+ GLenum type,
+ GLsizei width,
+ GLsizei height) {
+ EXPECT_CALL(*gl_, BindTexture(bind_target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(
+ target, level, internal_format, width, height, 0, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(bind_target, old_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForFramebufferClearing(
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ SetupExpectationsForFramebufferClearingMulti(
+ 0,
+ 0,
+ target,
+ clear_bits,
+ restore_red,
+ restore_green,
+ restore_blue,
+ restore_alpha,
+ restore_stencil,
+ restore_depth,
+ restore_scissor_test);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForRestoreClearState(
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ EXPECT_CALL(*gl_, ClearColor(
+ restore_red, restore_green, restore_blue, restore_alpha))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(restore_stencil))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearDepth(restore_depth))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (restore_scissor_test) {
+ EXPECT_CALL(*gl_, Enable(GL_SCISSOR_TEST))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForFramebufferClearingMulti(
+ GLuint read_framebuffer_service_id,
+ GLuint draw_framebuffer_service_id,
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ // TODO(gman): Figure out why InSequence stopped working.
+ // InSequence sequence;
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(target))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, read_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if ((clear_bits & GL_COLOR_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForColorMask(true, true, true, true);
+ }
+ if ((clear_bits & GL_STENCIL_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearStencil(0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMask(static_cast<GLuint>(-1)))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if ((clear_bits & GL_DEPTH_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearDepth(1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForDepthMask(true);
+ }
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, false);
+ EXPECT_CALL(*gl_, Clear(clear_bits))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForRestoreClearState(
+ restore_red, restore_green, restore_blue, restore_alpha,
+ restore_stencil, restore_depth, restore_scissor_test);
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_READ_FRAMEBUFFER_EXT, read_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, draw_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupShaderForUniform(GLenum uniform_type) {
+ static AttribInfo attribs[] = {
+ { "foo", 1, GL_FLOAT, 1, },
+ { "goo", 1, GL_FLOAT, 2, },
+ };
+ UniformInfo uniforms[] = {
+ { "bar", 1, uniform_type, 0, 2, -1, },
+ { "car", 4, uniform_type, 1, 1, -1, },
+ };
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ kClientVertexShaderId, kServiceVertexShaderId,
+ kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBindBuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindBuffer(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindBuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsBuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsBuffer, cmds::IsBuffer::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteBuffer(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteBuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForColorMask(bool red,
+ bool green,
+ bool blue,
+ bool alpha) {
+ if (ignore_cached_state_for_test_ || cached_color_mask_red_ != red ||
+ cached_color_mask_green_ != green || cached_color_mask_blue_ != blue ||
+ cached_color_mask_alpha_ != alpha) {
+ cached_color_mask_red_ = red;
+ cached_color_mask_green_ = green;
+ cached_color_mask_blue_ = blue;
+ cached_color_mask_alpha_ = alpha;
+ EXPECT_CALL(*gl_, ColorMask(red, green, blue, alpha))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForDepthMask(bool mask) {
+ if (ignore_cached_state_for_test_ || cached_depth_mask_ != mask) {
+ cached_depth_mask_ = mask;
+ EXPECT_CALL(*gl_, DepthMask(mask)).Times(1).RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForStencilMask(GLuint front_mask,
+ GLuint back_mask) {
+ if (ignore_cached_state_for_test_ ||
+ cached_stencil_front_mask_ != front_mask) {
+ cached_stencil_front_mask_ = front_mask;
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, front_mask))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ if (ignore_cached_state_for_test_ ||
+ cached_stencil_back_mask_ != back_mask) {
+ cached_stencil_back_mask_ = back_mask;
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, back_mask))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForEnableDisable(GLenum cap,
+ bool enable) {
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags_.cached_blend == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_blend = enable;
+ break;
+ case GL_CULL_FACE:
+ if (enable_flags_.cached_cull_face == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_cull_face = enable;
+ break;
+ case GL_DEPTH_TEST:
+ if (enable_flags_.cached_depth_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_depth_test = enable;
+ break;
+ case GL_DITHER:
+ if (enable_flags_.cached_dither == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_dither = enable;
+ break;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags_.cached_polygon_offset_fill == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_polygon_offset_fill = enable;
+ break;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags_.cached_sample_alpha_to_coverage == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_sample_alpha_to_coverage = enable;
+ break;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags_.cached_sample_coverage == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_sample_coverage = enable;
+ break;
+ case GL_SCISSOR_TEST:
+ if (enable_flags_.cached_scissor_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_scissor_test = enable;
+ break;
+ case GL_STENCIL_TEST:
+ if (enable_flags_.cached_stencil_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_stencil_test = enable;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ if (enable) {
+ EXPECT_CALL(*gl_, Enable(cap)).Times(1).RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, Disable(cap)).Times(1).RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForApplyingDirtyState(
+ bool framebuffer_is_rgb,
+ bool framebuffer_has_depth,
+ bool framebuffer_has_stencil,
+ GLuint color_bits,
+ bool depth_mask,
+ bool depth_enabled,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask,
+ bool stencil_enabled) {
+ bool color_mask_red = (color_bits & 0x1000) != 0;
+ bool color_mask_green = (color_bits & 0x0100) != 0;
+ bool color_mask_blue = (color_bits & 0x0010) != 0;
+ bool color_mask_alpha = (color_bits & 0x0001) && !framebuffer_is_rgb;
+
+ SetupExpectationsForColorMask(
+ color_mask_red, color_mask_green, color_mask_blue, color_mask_alpha);
+ SetupExpectationsForDepthMask(depth_mask);
+ SetupExpectationsForStencilMask(front_stencil_mask, back_stencil_mask);
+ SetupExpectationsForEnableDisable(GL_DEPTH_TEST,
+ framebuffer_has_depth && depth_enabled);
+ SetupExpectationsForEnableDisable(GL_STENCIL_TEST,
+ framebuffer_has_stencil && stencil_enabled);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForApplyingDefaultDirtyState() {
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ true, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+}
+
+GLES2DecoderTestBase::EnableFlags::EnableFlags()
+ : cached_blend(false),
+ cached_cull_face(false),
+ cached_depth_test(false),
+ cached_dither(true),
+ cached_polygon_offset_fill(false),
+ cached_sample_alpha_to_coverage(false),
+ cached_sample_coverage(false),
+ cached_scissor_test(false),
+ cached_stencil_test(false) {
+}
+
+void GLES2DecoderTestBase::DoBindFramebuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindFramebuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsFramebuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsFramebuffer, cmds::IsFramebuffer::Result>(
+ client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteFramebuffer(
+ GLuint client_id, GLuint service_id,
+ bool reset_draw, GLenum draw_target, GLuint draw_id,
+ bool reset_read, GLenum read_target, GLuint read_id) {
+ if (reset_draw) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(draw_target, draw_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (reset_read) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(read_target, read_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteFramebuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoBindRenderbuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ service_renderbuffer_id_ = service_id;
+ service_renderbuffer_valid_ = true;
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLenum gl_format,
+ GLsizei width,
+ GLsizei height) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ RenderbufferStorageMultisampleEXT(
+ target, samples, gl_format, width, height))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(target, samples, internal_format, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+void GLES2DecoderTestBase::RestoreRenderbufferBindings() {
+ GetDecoder()->RestoreRenderbufferBindings();
+ service_renderbuffer_valid_ = false;
+}
+
+void GLES2DecoderTestBase::EnsureRenderbufferBound(bool expect_bind) {
+ EXPECT_NE(expect_bind, service_renderbuffer_valid_);
+
+ if (expect_bind) {
+ service_renderbuffer_valid_ = true;
+ EXPECT_CALL(*gl_,
+ BindRenderbufferEXT(GL_RENDERBUFFER, service_renderbuffer_id_))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(_, _)).Times(0);
+ }
+}
+
+bool GLES2DecoderTestBase::DoIsRenderbuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsRenderbuffer, cmds::IsRenderbuffer::Result>(
+ client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteRenderbuffer(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteRenderbuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoBindTexture(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindTexture(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindTexture cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsTexture(GLuint client_id) {
+ return IsObjectHelper<cmds::IsTexture, cmds::IsTexture::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteTexture(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteTexturesImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoTexImage2D(
+ GLenum target, GLint level, GLenum internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(target, level, internal_format,
+ width, height, border, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(target, level, internal_format, width, height, format,
+ type, shared_memory_id, shared_memory_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoTexImage2DConvertInternalFormat(
+ GLenum target, GLint level, GLenum requested_internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset,
+ GLenum expected_internal_format) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(target, level, expected_internal_format,
+ width, height, border, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(target, level, requested_internal_format, width, height,
+ format, type, shared_memory_id, shared_memory_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoCompressedTexImage2D(
+ GLenum target, GLint level, GLenum format,
+ GLsizei width, GLsizei height, GLint border,
+ GLsizei size, uint32 bucket_id) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CompressedTexImage2D(
+ target, level, format, width, height, border, size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(bucket_id);
+ bucket->SetSize(size);
+ cmds::CompressedTexImage2DBucket cmd;
+ cmd.Init(
+ target, level, format, width, height,
+ bucket_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoRenderbufferStorage(
+ GLenum target, GLenum internal_format, GLenum actual_format,
+ GLsizei width, GLsizei height, GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(
+ target, actual_format, width, height))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(target, internal_format, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture_client_id, GLuint texture_service_id, GLint level,
+ GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(
+ target, attachment, textarget, texture_service_id, level))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(target, attachment, textarget, texture_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoFramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffer_target,
+ GLuint renderbuffer_client_id,
+ GLuint renderbuffer_service_id,
+ GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(
+ target, attachment, renderbuffer_target, renderbuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(target, attachment, renderbuffer_target, renderbuffer_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoVertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLsizei stride, GLuint offset) {
+ EXPECT_CALL(*gl_,
+ VertexAttribPointer(index, size, type, GL_FALSE, stride,
+ BufferOffset(offset)))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::VertexAttribPointer cmd;
+ cmd.Init(index, size, GL_FLOAT, GL_FALSE, stride, offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoVertexAttribDivisorANGLE(
+ GLuint index, GLuint divisor) {
+ EXPECT_CALL(*gl_,
+ VertexAttribDivisorANGLE(index, divisor))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::VertexAttribDivisorANGLE cmd;
+ cmd.Init(index, divisor);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::AddExpectationsForGenVertexArraysOES(){
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceVertexArrayId))
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForDeleteVertexArraysOES(){
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForDeleteBoundVertexArraysOES() {
+ // Expectations are the same as a delete, followed by binding VAO 0.
+ AddExpectationsForDeleteVertexArraysOES();
+ AddExpectationsForBindVertexArrayOES();
+}
+
+void GLES2DecoderTestBase::AddExpectationsForBindVertexArrayOES() {
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, BindVertexArrayOES(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ for (uint32 vv = 0; vv < group_->max_vertex_attribs(); ++vv) {
+ AddExpectationsForRestoreAttribState(vv);
+ }
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ELEMENT_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForRestoreAttribState(GLuint attrib) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, VertexAttribPointer(attrib, _, _, _, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(attrib, _))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ if (attrib != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+
+ // TODO(bajones): Not sure if I can tell which of these will be called
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(attrib))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(attrib))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+ }
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const int GLES2DecoderTestBase::kBackBufferWidth;
+const int GLES2DecoderTestBase::kBackBufferHeight;
+
+const GLint GLES2DecoderTestBase::kMaxTextureSize;
+const GLint GLES2DecoderTestBase::kMaxCubeMapTextureSize;
+const GLint GLES2DecoderTestBase::kNumVertexAttribs;
+const GLint GLES2DecoderTestBase::kNumTextureUnits;
+const GLint GLES2DecoderTestBase::kMaxTextureImageUnits;
+const GLint GLES2DecoderTestBase::kMaxVertexTextureImageUnits;
+const GLint GLES2DecoderTestBase::kMaxFragmentUniformVectors;
+const GLint GLES2DecoderTestBase::kMaxVaryingVectors;
+const GLint GLES2DecoderTestBase::kMaxVertexUniformVectors;
+const GLint GLES2DecoderTestBase::kMaxViewportWidth;
+const GLint GLES2DecoderTestBase::kMaxViewportHeight;
+
+const GLint GLES2DecoderTestBase::kViewportX;
+const GLint GLES2DecoderTestBase::kViewportY;
+const GLint GLES2DecoderTestBase::kViewportWidth;
+const GLint GLES2DecoderTestBase::kViewportHeight;
+
+const GLuint GLES2DecoderTestBase::kServiceAttrib0BufferId;
+const GLuint GLES2DecoderTestBase::kServiceFixedAttribBufferId;
+
+const GLuint GLES2DecoderTestBase::kServiceBufferId;
+const GLuint GLES2DecoderTestBase::kServiceFramebufferId;
+const GLuint GLES2DecoderTestBase::kServiceRenderbufferId;
+const GLuint GLES2DecoderTestBase::kServiceTextureId;
+const GLuint GLES2DecoderTestBase::kServiceProgramId;
+const GLuint GLES2DecoderTestBase::kServiceShaderId;
+const GLuint GLES2DecoderTestBase::kServiceElementBufferId;
+const GLuint GLES2DecoderTestBase::kServiceQueryId;
+const GLuint GLES2DecoderTestBase::kServiceVertexArrayId;
+
+const int32 GLES2DecoderTestBase::kSharedMemoryId;
+const size_t GLES2DecoderTestBase::kSharedBufferSize;
+const uint32 GLES2DecoderTestBase::kSharedMemoryOffset;
+const int32 GLES2DecoderTestBase::kInvalidSharedMemoryId;
+const uint32 GLES2DecoderTestBase::kInvalidSharedMemoryOffset;
+const uint32 GLES2DecoderTestBase::kInitialResult;
+const uint8 GLES2DecoderTestBase::kInitialMemoryValue;
+
+const uint32 GLES2DecoderTestBase::kNewClientId;
+const uint32 GLES2DecoderTestBase::kNewServiceId;
+const uint32 GLES2DecoderTestBase::kInvalidClientId;
+
+const GLuint GLES2DecoderTestBase::kServiceVertexShaderId;
+const GLuint GLES2DecoderTestBase::kServiceFragmentShaderId;
+
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumShaderId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumProgramId;
+
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumTextureBufferId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumVertexBufferId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumFBOId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumPositionAttrib;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumTexAttrib;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumSamplerLocation;
+
+const GLsizei GLES2DecoderTestBase::kNumVertices;
+const GLsizei GLES2DecoderTestBase::kNumIndices;
+const int GLES2DecoderTestBase::kValidIndexRangeStart;
+const int GLES2DecoderTestBase::kValidIndexRangeCount;
+const int GLES2DecoderTestBase::kInvalidIndexRangeStart;
+const int GLES2DecoderTestBase::kInvalidIndexRangeCount;
+const int GLES2DecoderTestBase::kOutOfRangeIndexRangeEnd;
+const GLuint GLES2DecoderTestBase::kMaxValidIndex;
+
+const GLint GLES2DecoderTestBase::kMaxAttribLength;
+const GLint GLES2DecoderTestBase::kAttrib1Size;
+const GLint GLES2DecoderTestBase::kAttrib2Size;
+const GLint GLES2DecoderTestBase::kAttrib3Size;
+const GLint GLES2DecoderTestBase::kAttrib1Location;
+const GLint GLES2DecoderTestBase::kAttrib2Location;
+const GLint GLES2DecoderTestBase::kAttrib3Location;
+const GLenum GLES2DecoderTestBase::kAttrib1Type;
+const GLenum GLES2DecoderTestBase::kAttrib2Type;
+const GLenum GLES2DecoderTestBase::kAttrib3Type;
+const GLint GLES2DecoderTestBase::kInvalidAttribLocation;
+const GLint GLES2DecoderTestBase::kBadAttribIndex;
+
+const GLint GLES2DecoderTestBase::kMaxUniformLength;
+const GLint GLES2DecoderTestBase::kUniform1Size;
+const GLint GLES2DecoderTestBase::kUniform2Size;
+const GLint GLES2DecoderTestBase::kUniform3Size;
+const GLint GLES2DecoderTestBase::kUniform1RealLocation;
+const GLint GLES2DecoderTestBase::kUniform2RealLocation;
+const GLint GLES2DecoderTestBase::kUniform2ElementRealLocation;
+const GLint GLES2DecoderTestBase::kUniform3RealLocation;
+const GLint GLES2DecoderTestBase::kUniform1FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform2FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform2ElementFakeLocation;
+const GLint GLES2DecoderTestBase::kUniform3FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform1DesiredLocation;
+const GLint GLES2DecoderTestBase::kUniform2DesiredLocation;
+const GLint GLES2DecoderTestBase::kUniform3DesiredLocation;
+const GLenum GLES2DecoderTestBase::kUniform1Type;
+const GLenum GLES2DecoderTestBase::kUniform2Type;
+const GLenum GLES2DecoderTestBase::kUniform3Type;
+const GLenum GLES2DecoderTestBase::kUniformCubemapType;
+const GLint GLES2DecoderTestBase::kInvalidUniformLocation;
+const GLint GLES2DecoderTestBase::kBadUniformIndex;
+
+#endif
+
+const char* GLES2DecoderTestBase::kAttrib1Name = "attrib1";
+const char* GLES2DecoderTestBase::kAttrib2Name = "attrib2";
+const char* GLES2DecoderTestBase::kAttrib3Name = "attrib3";
+const char* GLES2DecoderTestBase::kUniform1Name = "uniform1";
+const char* GLES2DecoderTestBase::kUniform2Name = "uniform2[0]";
+const char* GLES2DecoderTestBase::kUniform3Name = "uniform3[0]";
+
+void GLES2DecoderTestBase::SetupDefaultProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniform1Type,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::SetupCubemapProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniformCubemapType,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation, },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation, },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation, },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::SetupSamplerExternalProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniformSamplerExternalType,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation, },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation, },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation, },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderWithShaderTestBase::TearDown() {
+ GLES2DecoderTestBase::TearDown();
+}
+
+void GLES2DecoderTestBase::SetupShader(
+ GLES2DecoderTestBase::AttribInfo* attribs, size_t num_attribs,
+ GLES2DecoderTestBase::UniformInfo* uniforms, size_t num_uniforms,
+ GLuint program_client_id, GLuint program_service_id,
+ GLuint vertex_shader_client_id, GLuint vertex_shader_service_id,
+ GLuint fragment_shader_client_id, GLuint fragment_shader_service_id) {
+ {
+ InSequence s;
+
+ EXPECT_CALL(*gl_,
+ AttachShader(program_service_id, vertex_shader_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(program_service_id, fragment_shader_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ TestHelper::SetupShader(
+ gl_.get(), attribs, num_attribs, uniforms, num_uniforms,
+ program_service_id);
+ }
+
+ DoCreateShader(
+ GL_VERTEX_SHADER, vertex_shader_client_id, vertex_shader_service_id);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, fragment_shader_client_id,
+ fragment_shader_service_id);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(vertex_shader_client_id), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(fragment_shader_client_id), true);
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(program_client_id, vertex_shader_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(program_client_id, fragment_shader_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(program_client_id);
+
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+}
+
+void GLES2DecoderTestBase::DoEnableDisable(GLenum cap, bool enable) {
+ SetupExpectationsForEnableDisable(cap, enable);
+ if (enable) {
+ cmds::Enable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ } else {
+ cmds::Disable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::DoEnableVertexAttribArray(GLint index) {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(index))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(index);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBufferData(GLenum target, GLsizei size) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(target, size, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::BufferData cmd;
+ cmd.Init(target, size, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBufferSubData(
+ GLenum target, GLint offset, GLsizei size, const void* data) {
+ EXPECT_CALL(*gl_, BufferSubData(target, offset, size,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ memcpy(shared_memory_address_, data, size);
+ cmds::BufferSubData cmd;
+ cmd.Init(target, offset, size, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::SetupVertexBuffer() {
+ DoEnableVertexAttribArray(1);
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ GLfloat f = 0;
+ DoBufferData(GL_ARRAY_BUFFER, kNumVertices * 2 * sizeof(f));
+}
+
+void GLES2DecoderTestBase::SetupAllNeededVertexBuffers() {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoBufferData(GL_ARRAY_BUFFER, kNumVertices * 16 * sizeof(float));
+ DoEnableVertexAttribArray(0);
+ DoEnableVertexAttribArray(1);
+ DoEnableVertexAttribArray(2);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribPointer(2, 2, GL_FLOAT, 0, 0);
+}
+
+void GLES2DecoderTestBase::SetupIndexBuffer() {
+ DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ client_element_buffer_id_,
+ kServiceElementBufferId);
+ static const GLshort indices[] = {100, 1, 2, 3, 4, 5, 6, 7, 100, 9};
+ COMPILE_ASSERT(arraysize(indices) == kNumIndices, Indices_is_not_10);
+ DoBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices));
+ DoBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, 2, indices);
+ DoBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 2, sizeof(indices) - 2, &indices[1]);
+}
+
+void GLES2DecoderTestBase::SetupTexture() {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+};
+
+void GLES2DecoderTestBase::DeleteVertexBuffer() {
+ DoDeleteBuffer(client_buffer_id_, kServiceBufferId);
+}
+
+void GLES2DecoderTestBase::DeleteIndexBuffer() {
+ DoDeleteBuffer(client_element_buffer_id_, kServiceElementBufferId);
+}
+
+void GLES2DecoderTestBase::AddExpectationsForSimulatedAttrib0WithError(
+ GLsizei num_vertices, GLuint buffer_id, GLenum error) {
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ return;
+ }
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceAttrib0BufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(GL_ARRAY_BUFFER,
+ num_vertices * sizeof(GLfloat) * 4,
+ _, GL_DYNAMIC_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (error == GL_NO_ERROR) {
+ EXPECT_CALL(*gl_, BufferSubData(
+ GL_ARRAY_BUFFER, 0, num_vertices * sizeof(GLfloat) * 4, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, buffer_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForSimulatedAttrib0(
+ GLsizei num_vertices, GLuint buffer_id) {
+ AddExpectationsForSimulatedAttrib0WithError(
+ num_vertices, buffer_id, GL_NO_ERROR);
+}
+
+void GLES2DecoderTestBase::SetupMockGLBehaviors() {
+ ON_CALL(*gl_, BindVertexArrayOES(_))
+ .WillByDefault(Invoke(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnBindVertexArrayOES));
+ ON_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .WillByDefault(WithArg<1>(Invoke(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnBindArrayBuffer)));
+ ON_CALL(*gl_, VertexAttribPointer(_, _, _, _, _, NULL))
+ .WillByDefault(InvokeWithoutArgs(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnVertexAttribNullPointer));
+}
+
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::
+MockCommandBufferEngine() {
+
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kSharedBufferSize);
+ valid_buffer_ = MakeBufferFromSharedMemory(shm.Pass(), kSharedBufferSize);
+
+ ClearSharedMemory();
+}
+
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::
+~MockCommandBufferEngine() {}
+
+scoped_refptr<gpu::Buffer>
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::GetSharedMemoryBuffer(
+ int32 shm_id) {
+ return shm_id == kSharedMemoryId ? valid_buffer_ : invalid_buffer_;
+}
+
+void GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::set_token(
+ int32 token) {
+ DCHECK(false);
+}
+
+bool GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::SetGetBuffer(
+ int32 /* transfer_buffer_id */) {
+ DCHECK(false);
+ return false;
+}
+
+bool GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::SetGetOffset(
+ int32 offset) {
+ DCHECK(false);
+ return false;
+}
+
+int32 GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::GetGetOffset() {
+ DCHECK(false);
+ return 0;
+}
+
+void GLES2DecoderWithShaderTestBase::SetUp() {
+ GLES2DecoderTestBase::SetUp();
+ SetupDefaultProgram();
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
new file mode 100644
index 0000000..0730752
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -0,0 +1,643 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_surface_stub.h"
+#include "ui/gl/gl_mock.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class MemoryTracker;
+
+class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
+ public:
+ GLES2DecoderTestBase();
+ virtual ~GLES2DecoderTestBase();
+
+ // Template to call glGenXXX functions.
+ template <typename T>
+ void GenHelper(GLuint client_id) {
+ int8 buffer[sizeof(T) + sizeof(client_id)];
+ T& cmd = *reinterpret_cast<T*>(&buffer);
+ cmd.Init(1, &client_id);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_id)));
+ }
+
+ // This template exists solely so we can specialize it for
+ // certain commands.
+ template <typename T, int id>
+ void SpecializedSetup(bool valid) {
+ }
+
+ template <typename T>
+ T* GetImmediateAs() {
+ return reinterpret_cast<T*>(immediate_buffer_);
+ }
+
+ template <typename T, typename Command>
+ T GetImmediateDataAs(Command* cmd) {
+ return reinterpret_cast<T>(ImmediateDataAddress(cmd));
+ }
+
+ void ClearSharedMemory() {
+ engine_->ClearSharedMemory();
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ return decoder_->DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd)), 0);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return decoder_->DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd) + data_size), 0);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs() {
+ return reinterpret_cast<T>(shared_memory_address_);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAsWithOffset(uint32 offset) {
+ void* ptr = reinterpret_cast<int8*>(shared_memory_address_) + offset;
+ return reinterpret_cast<T>(ptr);
+ }
+
+ Buffer* GetBuffer(GLuint service_id) {
+ return group_->buffer_manager()->GetBuffer(service_id);
+ }
+
+ Framebuffer* GetFramebuffer(GLuint service_id) {
+ return group_->framebuffer_manager()->GetFramebuffer(service_id);
+ }
+
+ Renderbuffer* GetRenderbuffer(
+ GLuint service_id) {
+ return group_->renderbuffer_manager()->GetRenderbuffer(service_id);
+ }
+
+ TextureRef* GetTexture(GLuint client_id) {
+ return group_->texture_manager()->GetTexture(client_id);
+ }
+
+ Shader* GetShader(GLuint client_id) {
+ return group_->shader_manager()->GetShader(client_id);
+ }
+
+ Program* GetProgram(GLuint client_id) {
+ return group_->program_manager()->GetProgram(client_id);
+ }
+
+ QueryManager::Query* GetQueryInfo(GLuint client_id) {
+ return decoder_->GetQueryManager()->GetQuery(client_id);
+ }
+
+ // This name doesn't match the underlying function, but doing it this way
+ // prevents the need to special-case the unit test generation
+ VertexAttribManager* GetVertexArrayInfo(GLuint client_id) {
+ return decoder_->GetVertexArrayManager()->GetVertexAttribManager(client_id);
+ }
+
+ ProgramManager* program_manager() {
+ return group_->program_manager();
+ }
+
+ ImageManager* GetImageManager() { return decoder_->GetImageManager(); }
+
+ void DoCreateProgram(GLuint client_id, GLuint service_id);
+ void DoCreateShader(GLenum shader_type, GLuint client_id, GLuint service_id);
+
+ void SetBucketAsCString(uint32 bucket_id, const char* str);
+
+ void set_memory_tracker(MemoryTracker* memory_tracker) {
+ memory_tracker_ = memory_tracker;
+ }
+
+ struct InitState {
+ InitState();
+
+ std::string extensions;
+ std::string gl_version;
+ bool has_alpha;
+ bool has_depth;
+ bool has_stencil;
+ bool request_alpha;
+ bool request_depth;
+ bool request_stencil;
+ bool bind_generates_resource;
+ bool lose_context_when_out_of_memory;
+ bool use_native_vao; // default is true.
+ };
+
+ void InitDecoder(const InitState& init);
+ void InitDecoderWithCommandLine(const InitState& init,
+ const base::CommandLine* command_line);
+
+ void ResetDecoder();
+
+ const ContextGroup& group() const {
+ return *group_.get();
+ }
+
+ ::testing::StrictMock< ::gfx::MockGLInterface>* GetGLMock() const {
+ return gl_.get();
+ }
+
+ GLES2Decoder* GetDecoder() const {
+ return decoder_.get();
+ }
+
+ typedef TestHelper::AttribInfo AttribInfo;
+ typedef TestHelper::UniformInfo UniformInfo;
+
+ void SetupShader(
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint client_id, GLuint service_id,
+ GLuint vertex_shader_client_id, GLuint vertex_shader_service_id,
+ GLuint fragment_shader_client_id, GLuint fragment_shader_service_id);
+
+ void SetupInitCapabilitiesExpectations();
+ void SetupInitStateExpectations();
+ void ExpectEnableDisable(GLenum cap, bool enable);
+
+ // Setups up a shader for testing glUniform.
+ void SetupShaderForUniform(GLenum uniform_type);
+ void SetupDefaultProgram();
+ void SetupCubemapProgram();
+ void SetupSamplerExternalProgram();
+ void SetupTexture();
+
+ // Note that the error is returned as GLint instead of GLenum.
+ // This is because there is a mismatch in the types of GLenum and
+ // the error values GL_NO_ERROR, GL_INVALID_ENUM, etc. GLenum is
+ // typedef'd as unsigned int while the error values are defined as
+ // integers. This is problematic for template functions such as
+ // EXPECT_EQ that expect both types to be the same.
+ GLint GetGLError();
+
+ void DoBindBuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindFramebuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindRenderbuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoRenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLenum gl_format,
+ GLsizei width,
+ GLsizei height);
+ void RestoreRenderbufferBindings();
+ void EnsureRenderbufferBound(bool expect_bind);
+ void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindVertexArrayOES(GLuint client_id, GLuint service_id);
+
+ bool DoIsBuffer(GLuint client_id);
+ bool DoIsFramebuffer(GLuint client_id);
+ bool DoIsProgram(GLuint client_id);
+ bool DoIsRenderbuffer(GLuint client_id);
+ bool DoIsShader(GLuint client_id);
+ bool DoIsTexture(GLuint client_id);
+
+ void DoDeleteBuffer(GLuint client_id, GLuint service_id);
+ void DoDeleteFramebuffer(
+ GLuint client_id, GLuint service_id,
+ bool reset_draw, GLenum draw_target, GLuint draw_id,
+ bool reset_read, GLenum read_target, GLuint read_id);
+ void DoDeleteProgram(GLuint client_id, GLuint service_id);
+ void DoDeleteRenderbuffer(GLuint client_id, GLuint service_id);
+ void DoDeleteShader(GLuint client_id, GLuint service_id);
+ void DoDeleteTexture(GLuint client_id, GLuint service_id);
+
+ void DoCompressedTexImage2D(
+ GLenum target, GLint level, GLenum format,
+ GLsizei width, GLsizei height, GLint border,
+ GLsizei size, uint32 bucket_id);
+ void DoTexImage2D(
+ GLenum target, GLint level, GLenum internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset);
+ void DoTexImage2DConvertInternalFormat(
+ GLenum target, GLint level, GLenum requested_internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset,
+ GLenum expected_internal_format);
+ void DoRenderbufferStorage(
+ GLenum target, GLenum internal_format, GLenum actual_format,
+ GLsizei width, GLsizei height, GLenum error);
+ void DoFramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffer_target,
+ GLuint renderbuffer_client_id,
+ GLuint renderbuffer_service_id,
+ GLenum error);
+ void DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum tex_target,
+ GLuint texture_client_id, GLuint texture_service_id,
+ GLint level, GLenum error);
+ void DoVertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLsizei stride, GLuint offset);
+ void DoVertexAttribDivisorANGLE(GLuint index, GLuint divisor);
+
+ void DoEnableDisable(GLenum cap, bool enable);
+
+ void DoEnableVertexAttribArray(GLint index);
+
+ void DoBufferData(GLenum target, GLsizei size);
+
+ void DoBufferSubData(
+ GLenum target, GLint offset, GLsizei size, const void* data);
+
+ void SetupVertexBuffer();
+ void SetupAllNeededVertexBuffers();
+
+ void SetupIndexBuffer();
+
+ void DeleteVertexBuffer();
+
+ void DeleteIndexBuffer();
+
+ void SetupClearTextureExpectations(
+ GLuint service_id,
+ GLuint old_service_id,
+ GLenum bind_target,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLenum format,
+ GLenum type,
+ GLsizei width,
+ GLsizei height);
+
+ void SetupExpectationsForRestoreClearState(
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForFramebufferClearing(
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForFramebufferClearingMulti(
+ GLuint read_framebuffer_service_id,
+ GLuint draw_framebuffer_service_id,
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForDepthMask(bool mask);
+ void SetupExpectationsForEnableDisable(GLenum cap, bool enable);
+ void SetupExpectationsForColorMask(bool red,
+ bool green,
+ bool blue,
+ bool alpha);
+ void SetupExpectationsForStencilMask(GLuint front_mask, GLuint back_mask);
+
+ void SetupExpectationsForApplyingDirtyState(
+ bool framebuffer_is_rgb,
+ bool framebuffer_has_depth,
+ bool framebuffer_has_stencil,
+ GLuint color_bits, // NOTE! bits are 0x1000, 0x0100, 0x0010, and 0x0001
+ bool depth_mask,
+ bool depth_enabled,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask,
+ bool stencil_enabled);
+
+ void SetupExpectationsForApplyingDefaultDirtyState();
+
+ void AddExpectationsForSimulatedAttrib0WithError(
+ GLsizei num_vertices, GLuint buffer_id, GLenum error);
+
+ void AddExpectationsForSimulatedAttrib0(
+ GLsizei num_vertices, GLuint buffer_id);
+
+ void AddExpectationsForGenVertexArraysOES();
+ void AddExpectationsForDeleteVertexArraysOES();
+ void AddExpectationsForDeleteBoundVertexArraysOES();
+ void AddExpectationsForBindVertexArrayOES();
+ void AddExpectationsForRestoreAttribState(GLuint attrib);
+
+ GLvoid* BufferOffset(unsigned i) {
+ return static_cast<int8 *>(NULL)+(i);
+ }
+
+ template <typename Command, typename Result>
+ bool IsObjectHelper(GLuint client_id) {
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ Command cmd;
+ cmd.Init(client_id, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bool isObject = static_cast<bool>(*result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ return isObject;
+ }
+
+ protected:
+ static const int kBackBufferWidth = 128;
+ static const int kBackBufferHeight = 64;
+
+ static const GLint kMaxTextureSize = 2048;
+ static const GLint kMaxCubeMapTextureSize = 256;
+ static const GLint kNumVertexAttribs = 16;
+ static const GLint kNumTextureUnits = 8;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxVertexTextureImageUnits = 2;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kMaxViewportWidth = 8192;
+ static const GLint kMaxViewportHeight = 8192;
+
+ static const GLint kViewportX = 0;
+ static const GLint kViewportY = 0;
+ static const GLint kViewportWidth = kBackBufferWidth;
+ static const GLint kViewportHeight = kBackBufferHeight;
+
+ static const GLuint kServiceAttrib0BufferId = 801;
+ static const GLuint kServiceFixedAttribBufferId = 802;
+
+ static const GLuint kServiceBufferId = 301;
+ static const GLuint kServiceFramebufferId = 302;
+ static const GLuint kServiceRenderbufferId = 303;
+ static const GLuint kServiceTextureId = 304;
+ static const GLuint kServiceProgramId = 305;
+ static const GLuint kServiceShaderId = 306;
+ static const GLuint kServiceElementBufferId = 308;
+ static const GLuint kServiceQueryId = 309;
+ static const GLuint kServiceVertexArrayId = 310;
+
+ static const int32 kSharedMemoryId = 401;
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32 kSharedMemoryOffset = 132;
+ static const int32 kInvalidSharedMemoryId = 402;
+ static const uint32 kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const uint32 kInitialResult = 0xBDBDBDBDu;
+ static const uint8 kInitialMemoryValue = 0xBDu;
+
+ static const uint32 kNewClientId = 501;
+ static const uint32 kNewServiceId = 502;
+ static const uint32 kInvalidClientId = 601;
+
+ static const GLuint kServiceVertexShaderId = 321;
+ static const GLuint kServiceFragmentShaderId = 322;
+
+ static const GLuint kServiceCopyTextureChromiumShaderId = 701;
+ static const GLuint kServiceCopyTextureChromiumProgramId = 721;
+
+ static const GLuint kServiceCopyTextureChromiumTextureBufferId = 751;
+ static const GLuint kServiceCopyTextureChromiumVertexBufferId = 752;
+ static const GLuint kServiceCopyTextureChromiumFBOId = 753;
+ static const GLuint kServiceCopyTextureChromiumPositionAttrib = 761;
+ static const GLuint kServiceCopyTextureChromiumTexAttrib = 762;
+ static const GLuint kServiceCopyTextureChromiumSamplerLocation = 763;
+
+ static const GLsizei kNumVertices = 100;
+ static const GLsizei kNumIndices = 10;
+ static const int kValidIndexRangeStart = 1;
+ static const int kValidIndexRangeCount = 7;
+ static const int kInvalidIndexRangeStart = 0;
+ static const int kInvalidIndexRangeCount = 7;
+ static const int kOutOfRangeIndexRangeEnd = 10;
+ static const GLuint kMaxValidIndex = 7;
+
+ static const GLint kMaxAttribLength = 10;
+ static const char* kAttrib1Name;
+ static const char* kAttrib2Name;
+ static const char* kAttrib3Name;
+ static const GLint kAttrib1Size = 1;
+ static const GLint kAttrib2Size = 1;
+ static const GLint kAttrib3Size = 1;
+ static const GLint kAttrib1Location = 0;
+ static const GLint kAttrib2Location = 1;
+ static const GLint kAttrib3Location = 2;
+ static const GLenum kAttrib1Type = GL_FLOAT_VEC4;
+ static const GLenum kAttrib2Type = GL_FLOAT_VEC2;
+ static const GLenum kAttrib3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidAttribLocation = 30;
+ static const GLint kBadAttribIndex = kNumVertexAttribs;
+
+ static const GLint kMaxUniformLength = 12;
+ static const char* kUniform1Name;
+ static const char* kUniform2Name;
+ static const char* kUniform3Name;
+ static const GLint kUniform1Size = 1;
+ static const GLint kUniform2Size = 3;
+ static const GLint kUniform3Size = 2;
+ static const GLint kUniform1RealLocation = 3;
+ static const GLint kUniform2RealLocation = 10;
+ static const GLint kUniform2ElementRealLocation = 12;
+ static const GLint kUniform3RealLocation = 20;
+ static const GLint kUniform1FakeLocation = 0; // These are
+ static const GLint kUniform2FakeLocation = 1; // hardcoded
+ static const GLint kUniform2ElementFakeLocation = 0x10001; // to match
+ static const GLint kUniform3FakeLocation = 2; // ProgramManager.
+ static const GLint kUniform1DesiredLocation = -1;
+ static const GLint kUniform2DesiredLocation = -1;
+ static const GLint kUniform3DesiredLocation = -1;
+ static const GLenum kUniform1Type = GL_SAMPLER_2D;
+ static const GLenum kUniform2Type = GL_INT_VEC2;
+ static const GLenum kUniform3Type = GL_FLOAT_VEC3;
+ static const GLenum kUniformSamplerExternalType = GL_SAMPLER_EXTERNAL_OES;
+ static const GLenum kUniformCubemapType = GL_SAMPLER_CUBE;
+ static const GLint kInvalidUniformLocation = 30;
+ static const GLint kBadUniformIndex = 1000;
+
+ // Use StrictMock to make 100% sure we know how GL will be called.
+ scoped_ptr< ::testing::StrictMock< ::gfx::MockGLInterface> > gl_;
+ scoped_refptr<gfx::GLSurfaceStub> surface_;
+ scoped_refptr<gfx::GLContextStubWithExtensions> context_;
+ scoped_ptr<MockGLES2Decoder> mock_decoder_;
+ scoped_ptr<GLES2Decoder> decoder_;
+ MemoryTracker* memory_tracker_;
+
+ GLuint client_buffer_id_;
+ GLuint client_framebuffer_id_;
+ GLuint client_program_id_;
+ GLuint client_renderbuffer_id_;
+ GLuint client_shader_id_;
+ GLuint client_texture_id_;
+ GLuint client_element_buffer_id_;
+ GLuint client_vertex_shader_id_;
+ GLuint client_fragment_shader_id_;
+ GLuint client_query_id_;
+ GLuint client_vertexarray_id_;
+
+ uint32 shared_memory_id_;
+ uint32 shared_memory_offset_;
+ void* shared_memory_address_;
+ void* shared_memory_base_;
+
+ GLuint service_renderbuffer_id_;
+ bool service_renderbuffer_valid_;
+
+ uint32 immediate_buffer_[64];
+
+ const bool ignore_cached_state_for_test_;
+ bool cached_color_mask_red_;
+ bool cached_color_mask_green_;
+ bool cached_color_mask_blue_;
+ bool cached_color_mask_alpha_;
+ bool cached_depth_mask_;
+ GLuint cached_stencil_front_mask_;
+ GLuint cached_stencil_back_mask_;
+
+ struct EnableFlags {
+ EnableFlags();
+ bool cached_blend;
+ bool cached_cull_face;
+ bool cached_depth_test;
+ bool cached_dither;
+ bool cached_polygon_offset_fill;
+ bool cached_sample_alpha_to_coverage;
+ bool cached_sample_coverage;
+ bool cached_scissor_test;
+ bool cached_stencil_test;
+ };
+
+ EnableFlags enable_flags_;
+
+ private:
+ class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ MockCommandBufferEngine();
+
+ virtual ~MockCommandBufferEngine();
+
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE;
+
+ void ClearSharedMemory() {
+ memset(valid_buffer_->memory(), kInitialMemoryValue, kSharedBufferSize);
+ }
+
+ virtual void set_token(int32 token) OVERRIDE;
+
+ virtual bool SetGetBuffer(int32 /* transfer_buffer_id */) OVERRIDE;
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE;
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE;
+
+ private:
+ scoped_refptr<gpu::Buffer> valid_buffer_;
+ scoped_refptr<gpu::Buffer> invalid_buffer_;
+ };
+
+ // MockGLStates is used to track GL states and emulate driver
+ // behaviors on top of MockGLInterface.
+ class MockGLStates {
+ public:
+ MockGLStates()
+ : bound_array_buffer_object_(0),
+ bound_vertex_array_object_(0) {
+ }
+
+ ~MockGLStates() {
+ }
+
+ void OnBindArrayBuffer(GLuint id) {
+ bound_array_buffer_object_ = id;
+ }
+
+ void OnBindVertexArrayOES(GLuint id) {
+ bound_vertex_array_object_ = id;
+ }
+
+ void OnVertexAttribNullPointer() {
+ // When a vertex array object is bound, some drivers (AMD Linux,
+ // Qualcomm, etc.) have a bug where it incorrectly generates an
+ // GL_INVALID_OPERATION on glVertexAttribPointer() if pointer
+ // is NULL, no buffer is bound on GL_ARRAY_BUFFER.
+ // Make sure we don't trigger this bug.
+ if (bound_vertex_array_object_ != 0)
+ EXPECT_TRUE(bound_array_buffer_object_ != 0);
+ }
+
+ private:
+ GLuint bound_array_buffer_object_;
+ GLuint bound_vertex_array_object_;
+ }; // class MockGLStates
+
+ void AddExpectationsForVertexAttribManager();
+ void SetupMockGLBehaviors();
+
+ scoped_ptr< ::testing::StrictMock<MockCommandBufferEngine> > engine_;
+ scoped_refptr<ContextGroup> group_;
+ MockGLStates gl_states_;
+};
+
+class GLES2DecoderWithShaderTestBase : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderWithShaderTestBase()
+ : GLES2DecoderTestBase() {
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+};
+
+// SpecializedSetup specializations that are needed in multiple unittest files.
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(bool valid);
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
new file mode 100644
index 0000000..74149ef
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
@@ -0,0 +1,427 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderRestoreStateTest : public GLES2DecoderManualInitTest {
+ public:
+ GLES2DecoderRestoreStateTest() {}
+
+ protected:
+ void AddExpectationsForActiveTexture(GLenum unit);
+ void AddExpectationsForBindTexture(GLenum target, GLuint id);
+ void InitializeContextState(ContextState* state,
+ uint32 non_default_unit,
+ uint32 active_unit);
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderRestoreStateTest,
+ ::testing::Bool());
+
+void GLES2DecoderRestoreStateTest::AddExpectationsForActiveTexture(
+ GLenum unit) {
+ EXPECT_CALL(*gl_, ActiveTexture(unit)).Times(1).RetiresOnSaturation();
+}
+
+void GLES2DecoderRestoreStateTest::AddExpectationsForBindTexture(GLenum target,
+ GLuint id) {
+ EXPECT_CALL(*gl_, BindTexture(target, id)).Times(1).RetiresOnSaturation();
+}
+
+void GLES2DecoderRestoreStateTest::InitializeContextState(
+ ContextState* state,
+ uint32 non_default_unit,
+ uint32 active_unit) {
+ state->texture_units.resize(group().max_texture_units());
+ for (uint32 tt = 0; tt < state->texture_units.size(); ++tt) {
+ TextureRef* ref_cube_map =
+ group().texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP);
+ state->texture_units[tt].bound_texture_cube_map = ref_cube_map;
+ TextureRef* ref_2d =
+ (tt == non_default_unit)
+ ? group().texture_manager()->GetTexture(client_texture_id_)
+ : group().texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
+ state->texture_units[tt].bound_texture_2d = ref_2d;
+ }
+ state->active_texture_unit = active_unit;
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NullPreviousStateBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupTexture();
+
+ InSequence sequence;
+ // Expect to restore texture bindings for unit GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId);
+
+ // Expect to restore texture bindings for remaining units.
+ for (uint32 i = 1; i < group().max_texture_units(); ++i) {
+ AddExpectationsForActiveTexture(GL_TEXTURE0 + i);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId);
+ }
+
+ // Expect to restore the active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(NULL);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NullPreviousState) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+ SetupTexture();
+
+ InSequence sequence;
+ // Expect to restore texture bindings for unit GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP, 0);
+
+ // Expect to restore texture bindings for remaining units.
+ for (uint32 i = 1; i < group().max_texture_units(); ++i) {
+ AddExpectationsForActiveTexture(GL_TEXTURE0 + i);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP, 0);
+ }
+
+ // Expect to restore the active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(NULL);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, WithPreviousStateBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE0 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, WithPreviousState) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE0 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, ActiveUnit1) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE1 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit0BGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE0 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 0, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE0 unit to
+ // a default texture.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE1 unit to
+ // non-default.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit1BGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE0 unit.
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE1 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 1, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding to the non-default texture
+ // for GL_TEXTURE0 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore GL_TEXTURE_2D binding to the default texture
+ // for GL_TEXTURE1 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit0) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE0 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 0, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE0 unit to
+ // the 0 texture.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE1 unit to
+ // non-default.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit1) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE0 unit.
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE1 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 1, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding to the non-default texture
+ // for GL_TEXTURE0 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore GL_TEXTURE_2D binding to the 0 texture
+ // for GL_TEXTURE1 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ContextStateCapabilityCaching) {
+ struct TestInfo {
+ GLenum gl_enum;
+ bool default_state;
+ bool expect_set;
+ };
+
+ // TODO(vmiura): Should autogen this to match build_gles2_cmd_buffer.py.
+ TestInfo test[] = {{GL_BLEND, false, true},
+ {GL_CULL_FACE, false, true},
+ {GL_DEPTH_TEST, false, false},
+ {GL_DITHER, true, true},
+ {GL_POLYGON_OFFSET_FILL, false, true},
+ {GL_SAMPLE_ALPHA_TO_COVERAGE, false, true},
+ {GL_SAMPLE_COVERAGE, false, true},
+ {GL_SCISSOR_TEST, false, true},
+ {GL_STENCIL_TEST, false, false},
+ {0, false, false}};
+
+ InitState init;
+ init.gl_version = "2.1";
+ InitDecoder(init);
+
+ for (int i = 0; test[i].gl_enum; i++) {
+ bool enable_state = test[i].default_state;
+
+ // Test setting default state initially is ignored.
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+
+ // Test new and cached state changes.
+ for (int n = 0; n < 3; n++) {
+ enable_state = !enable_state;
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+ }
+ }
+}
+
+// TODO(vmiura): Tests for VAO restore.
+
+// TODO(vmiura): Tests for ContextState::RestoreAttribute().
+
+// TODO(vmiura): Tests for ContextState::RestoreBufferBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreProgramBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreRenderbufferBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreProgramBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreGlobalState().
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
new file mode 100644
index 0000000..7c8e5ae
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
@@ -0,0 +1,2390 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderGeometryInstancingTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderGeometryInstancingTest() : GLES2DecoderWithShaderTest() {}
+
+ virtual void SetUp() {
+ InitState init;
+ init.extensions = "GL_ANGLE_instanced_arrays";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderGeometryInstancingTest,
+ ::testing::Bool());
+
+void GLES2DecoderManualInitTest::DirtyStateMaskTest(GLuint color_bits,
+ bool depth_mask,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask) {
+ ColorMask color_mask_cmd;
+ color_mask_cmd.Init((color_bits & 0x1000) != 0,
+ (color_bits & 0x0100) != 0,
+ (color_bits & 0x0010) != 0,
+ (color_bits & 0x0001) != 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ DepthMask depth_mask_cmd;
+ depth_mask_cmd.Init(depth_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ StencilMaskSeparate front_stencil_mask_cmd;
+ front_stencil_mask_cmd.Init(GL_FRONT, front_stencil_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(front_stencil_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ StencilMaskSeparate back_stencil_mask_cmd;
+ back_stencil_mask_cmd.Init(GL_BACK, back_stencil_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(back_stencil_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupExpectationsForApplyingDirtyState(
+ false, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ true, // Framebuffer has stencil
+ color_bits, // color bits
+ depth_mask, // depth mask
+ false, // depth enabled
+ front_stencil_mask, // front stencil mask
+ back_stencil_mask, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Test that with an RGB backbuffer if we set the color mask to 1,1,1,1 it is
+// set to 1,1,1,0 at Draw time but is 1,1,1,1 at query time.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferColorMask) {
+ ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_COLOR_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_COLOR_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_COLOR_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+ EXPECT_EQ(1, result->GetData()[1]);
+ EXPECT_EQ(1, result->GetData()[2]);
+ EXPECT_EQ(1, result->GetData()[3]);
+}
+
+// Test that with no depth if we set DepthMask true that it's set to false at
+// draw time but querying it returns true.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferDepthMask) {
+ EXPECT_CALL(*gl_, DepthMask(true)).Times(0).RetiresOnSaturation();
+ DepthMask cmd;
+ cmd.Init(true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+// Test that with no stencil if we set the stencil mask it's still set to 0 at
+// draw time but gets our value if we query.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferStencilMask) {
+ const GLint kMask = 123;
+ EXPECT_CALL(*gl_, StencilMask(kMask)).Times(0).RetiresOnSaturation();
+ StencilMask cmd;
+ cmd.Init(kMask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(kMask, result->GetData()[0]);
+}
+
+// Test that if an FBO is bound we get the correct masks.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferColorMaskFBO) {
+ ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DoEnableVertexAttribArray(2);
+ DoVertexAttribPointer(2, 2, GL_FLOAT, 0, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that no extra calls are made on the next draw.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Setup Frame buffer.
+ // needs to be 1x1 or else it's not renderable.
+ const GLsizei kWidth = 1;
+ const GLsizei kHeight = 1;
+ const GLenum kFormat = GL_RGB;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Pass some data so the texture will be marked as cleared.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ // This time state needs to be set.
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that no extra calls are made on the next draw.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Unbind
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthEnableWithDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_DEPTH_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ true, // depth mask
+ true, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthEnableWithoutRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_DEPTH_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, StencilEnableWithStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_STENCIL_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(
+ true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ true, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ GLES2Decoder::kDefaultStencilMask, // front stencil mask
+ GLES2Decoder::kDefaultStencilMask, // back stencil mask
+ true); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, StencilEnableWithoutRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_STENCIL_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedColorMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all color_bits combinations twice.
+ for (int i = 0; i < 32; i++) {
+ GLuint color_bits = (i & 1 ? 0x0001 : 0x0000) | (i & 2 ? 0x0010 : 0x0000) |
+ (i & 4 ? 0x0100 : 0x0000) | (i & 8 ? 0x1000 : 0x0000);
+
+ // Toggle depth_test to force ApplyDirtyState each time.
+ DirtyStateMaskTest(color_bits, false, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(color_bits, true, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(color_bits, false, 0xffffffff, 0xffffffff);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedDepthMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all depth_mask combinations twice.
+ for (int i = 0; i < 4; i++) {
+ bool depth_mask = (i & 1) == 1;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, depth_mask, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(0x0101, depth_mask, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(0x1010, depth_mask, 0xffffffff, 0xffffffff);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedStencilMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all stencil_mask combinations twice.
+ for (int i = 0; i < 4; i++) {
+ GLuint stencil_mask = (i & 1) ? 0xf0f0f0f0 : 0x0f0f0f0f;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, true, stencil_mask, 0xffffffff);
+ DirtyStateMaskTest(0x0101, true, stencil_mask, 0xffffffff);
+ DirtyStateMaskTest(0x1010, true, stencil_mask, 0xffffffff);
+ }
+
+ for (int i = 0; i < 4; i++) {
+ GLuint stencil_mask = (i & 1) ? 0xf0f0f0f0 : 0x0f0f0f0f;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, true, 0xffffffff, stencil_mask);
+ DirtyStateMaskTest(0x0101, true, 0xffffffff, stencil_mask);
+ DirtyStateMaskTest(0x1010, true, 0xffffffff, stencil_mask);
+ }
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysNoAttributesSucceeds) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Tests when the math overflows (0x40000000 * sizeof GLfloat)
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0OverflowFails) {
+ const GLsizei kLargeCount = 0x40000000;
+ SetupTexture();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Tests when the math overflows (0x7FFFFFFF + 1 = 0x8000000 verts)
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0PosToNegFails) {
+ const GLsizei kLargeCount = 0x7FFFFFFF;
+ SetupTexture();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Tests when the driver returns an error
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0OOMFails) {
+ const GLsizei kFakeLargeCount = 0x1234;
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0WithError(
+ kFakeLargeCount, 0, GL_OUT_OF_MEMORY);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kFakeLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Test that we lose context.
+TEST_P(GLES2DecoderManualInitTest, LoseContextWhenOOM) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ init.lose_context_when_out_of_memory = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ const GLsizei kFakeLargeCount = 0x1234;
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0WithError(
+ kFakeLargeCount, 0, GL_OUT_OF_MEMORY);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ // Other contexts in the group should be lost also.
+ EXPECT_CALL(*mock_decoder_, LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kFakeLargeCount);
+ // This context should be lost.
+ EXPECT_EQ(error::kLostContext, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_TRUE(decoder_->WasContextLost());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysBadTextureUsesBlack) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // This is an NPOT texture. As the default filtering requires mips
+ // this should trigger replacing with black textures before rendering.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 3,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ {
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_, BindTexture(GL_TEXTURE_2D, TestHelper::kServiceBlackTexture2dId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysMissingAttributesFails) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawArraysMissingAttributesZeroCountSucceeds) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Same as DrawArraysValidAttributesSucceeds, but with workaround
+// |init_vertex_attributes|.
+TEST_P(GLES2DecoderManualInitTest, InitVertexAttributes) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::INIT_VERTEX_ATTRIBUTES));
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ SetupDefaultProgram();
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysDeletedBufferFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DeleteVertexBuffer();
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysDeletedProgramSucceeds) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId)).Times(1);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysWithInvalidModeFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_QUADS, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysInvalidCountFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ // Try start > 0
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 1, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with count > size
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with attrib offset > 0
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with size > 2 (ie, vec3 instead of vec2)
+ DoVertexAttribPointer(1, 3, GL_FLOAT, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with stride > 8 (vec2 + vec2 byte)
+ DoVertexAttribPointer(1, 2, GL_FLOAT, sizeof(GLfloat) * 3, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysInstancedANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, VertexAttribDivisorANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(_, _))
+ .Times(0)
+ .RetiresOnSaturation();
+
+ VertexAttribDivisorANGLE cmd;
+ cmd.Init(0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLENoAttributesFails) {
+ SetupTexture();
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLESimulatedAttrib0) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, 3))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEMissingAttributesFails) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEMissingAttributesZeroCountSucceeds) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEWithInvalidModeFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_QUADS, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEInvalidPrimcountFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, 1, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+// Per-instance data is twice as large, but number of instances is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeInstanceSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, kNumVertices / 2))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Regular drawArrays takes the divisor into account
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysWithDivisorSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ // Access the data right at the end of the buffer.
+ DoVertexAttribPointer(
+ 0, 2, GL_FLOAT, 0, (kNumVertices - 1) * 2 * sizeof(GLfloat));
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-instance data is twice as large, but divisor is twice
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeDivisorSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 2);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest, DrawArraysInstancedANGLELargeFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices + 1, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-index data is twice as large, but number of indices is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeIndexSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 4, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices / 2, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices / 2, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLENoDivisor0Fails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysNoDivisor0Fails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsNoAttributesSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsMissingAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawElementsMissingAttributesZeroCountSucceeds) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsExtraAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(6);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsDeletedBufferFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DeleteIndexBuffer();
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsDeletedProgramSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(1);
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId)).Times(1);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsWithInvalidModeFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_QUADS,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsInvalidCountFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ // Try start > 0
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, kNumIndices, GL_UNSIGNED_SHORT, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with count > size
+ cmd.Init(GL_TRIANGLES, kNumIndices + 1, GL_UNSIGNED_SHORT, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsOutOfRangeIndicesFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kInvalidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kInvalidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsOddOffsetForUint16Fails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, kInvalidIndexRangeCount, GL_UNSIGNED_SHORT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsInstancedANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLENoAttributesFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLESimulatedAttrib0) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ 3))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEMissingAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEMissingAttributesZeroCountSucceeds) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, kValidIndexRangeStart * 2, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEValidAttributesSucceeds) {
+ SetupIndexBuffer();
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEWithInvalidModeFails) {
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_QUADS,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_INVALID_ENUM,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+// Per-instance data is twice as large, but number of instances is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeInstanceSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ // Add offset so we're sure we're accessing data near the end of the buffer.
+ DoVertexAttribPointer(
+ 1,
+ 2,
+ GL_FLOAT,
+ 0,
+ (kNumVertices - kMaxValidIndex - 1) * 2 * sizeof(GLfloat));
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices / 2))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Regular drawElements takes the divisor into account
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsWithDivisorSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ // Add offset so we're sure we're accessing data near the end of the buffer.
+ DoVertexAttribPointer(
+ 1,
+ 2,
+ GL_FLOAT,
+ 0,
+ (kNumVertices - kMaxValidIndex - 1) * 2 * sizeof(GLfloat));
+
+ DoEnableVertexAttribArray(0);
+ // Access the data right at the end of the buffer.
+ DoVertexAttribPointer(
+ 0, 2, GL_FLOAT, 0, (kNumVertices - 1) * 2 * sizeof(GLfloat));
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-instance data is twice as large, but divisor is twice
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeDivisorSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 2);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TRIANGLES,
+ kInvalidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kInvalidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEInvalidPrimcountFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-index data is twice as large, but values of indices are smaller
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeIndexSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 4, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLENoDivisor0Fails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsNoDivisor0Fails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysClearsAfterTexImage2DNULL) {
+ SetupAllNeededVertexBuffers();
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Create an uncleared texture with 2 levels.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsClearsAfterTexImage2DNULL) {
+ SetupAllNeededVertexBuffers();
+ SetupIndexBuffer();
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Create an uncleared texture with 2 levels.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawClearsAfterTexImage2DNULLInFBO) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ SetupAllNeededVertexBuffers();
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawWitFBOThatCantClearDoesNotDraw) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_UNSUPPORTED))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_FRAMEBUFFER_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawClearsAfterRenderbufferStorageInFBO) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(
+ GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 100, 50, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawArraysClearsAfterTexImage2DNULLCubemap) {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+
+ static const GLenum faces[] = {
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ };
+ SetupCubemapProgram();
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+ // Fill out all the faces for 2 levels, leave 2 uncleared.
+ for (int ii = 0; ii < 6; ++ii) {
+ GLenum face = faces[ii];
+ int32 shm_id =
+ (face == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y) ? 0 : kSharedMemoryId;
+ uint32 shm_offset =
+ (face == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y) ? 0 : kSharedMemoryOffset;
+ DoTexImage2D(face,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shm_id,
+ shm_offset);
+ DoTexImage2D(face,
+ 1,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shm_id,
+ shm_offset);
+ }
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawClearsAfterRenderbuffersWithMultipleAttachments) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER,
+ GL_DEPTH_COMPONENT16,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ SetupTexture();
+ SetupExpectationsForFramebufferClearing(
+ GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ true, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawingWithFBOTwiceChecksForFBOCompleteOnce) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ SetupAllNeededVertexBuffers();
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture that is cleared.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ // Make sure we check for framebuffer complete.
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawClearsDepthTexture) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ const GLenum attachment = GL_DEPTH_ATTACHMENT;
+ const GLenum target = GL_TEXTURE_2D;
+ const GLint level = 0;
+ DoBindTexture(target, client_texture_id_, kServiceTextureId);
+
+ // Create a depth texture.
+ DoTexImage2D(target,
+ level,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+
+ // Enable GL_SCISSOR_TEST to make sure we disable it in the clear,
+ // then re-enable it.
+ DoEnableDisable(GL_SCISSOR_TEST, true);
+
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_DRAW_FRAMEBUFFER_EXT,
+ attachment,
+ target,
+ kServiceTextureId,
+ level))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_DRAW_FRAMEBUFFER_EXT))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
+ SetupExpectationsForStencilMask(GLES2Decoder::kDefaultStencilMask,
+ GLES2Decoder::kDefaultStencilMask);
+ EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
+ SetupExpectationsForDepthMask(true);
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, false);
+
+ EXPECT_CALL(*gl_, Clear(GL_DEPTH_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ SetupExpectationsForRestoreClearState(0.0f, 0.0f, 0.0f, 0.0f, 0, 1.0f, true);
+
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
new file mode 100644
index 0000000..28e24de
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTestWithCHROMIUMPathRendering : public GLES2DecoderTest {
+ public:
+ GLES2DecoderTestWithCHROMIUMPathRendering() {}
+ virtual void SetUp() OVERRIDE {
+ InitState init;
+ init.gl_version = "opengl es 3.1";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ init.extensions = "GL_NV_path_rendering";
+ InitDecoder(init);
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderTestWithCHROMIUMPathRendering,
+ ::testing::Bool());
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
new file mode 100644
index 0000000..a81be2f
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_extensions.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
+
+// TODO(gman): BlitFramebufferCHROMIUM
+// TODO(gman): RenderbufferStorageMultisampleCHROMIUM
+// TODO(gman): RenderbufferStorageMultisampleEXT
+// TODO(gman): FramebufferTexture2DMultisampleEXT
+// TODO(gman): DiscardFramebufferEXTImmediate
+
+TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
+ MatrixLoadfCHROMIUMImmediateValidArgs) {
+ cmds::MatrixLoadfCHROMIUMImmediate& cmd =
+ *GetImmediateAs<cmds::MatrixLoadfCHROMIUMImmediate>();
+ SpecializedSetup<cmds::MatrixLoadfCHROMIUMImmediate, 0>(true);
+ GLfloat temp[16] = {
+ 0,
+ };
+ cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM,
+ reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
+ MatrixLoadIdentityCHROMIUMValidArgs) {
+ EXPECT_CALL(*gl_, MatrixLoadIdentityEXT(GL_PATH_PROJECTION_CHROMIUM));
+ SpecializedSetup<cmds::MatrixLoadIdentityCHROMIUM, 0>(true);
+ cmds::MatrixLoadIdentityCHROMIUM cmd;
+ cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
new file mode 100644
index 0000000..32ba98d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -0,0 +1,2395 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderTestWithExtensionsOnGLES2 : public GLES2DecoderTest {
+ public:
+ GLES2DecoderTestWithExtensionsOnGLES2() {}
+
+ virtual void SetUp() {}
+ void Init(const char* extensions) {
+ InitState init;
+ init.extensions = extensions;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+ }
+};
+
+TEST_P(GLES2DecoderTest, CheckFramebufferStatusWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ CheckFramebufferStatus::Result* result =
+ static_cast<CheckFramebufferStatus::Result*>(shared_memory_address_);
+ *result = 0;
+ CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE), *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindAndDeleteFramebuffer) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ true,
+ GL_FRAMEBUFFER,
+ 0,
+ true,
+ GL_FRAMEBUFFER,
+ 0);
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferTexture2DWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ FramebufferTexture2D cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithRenderbuffer) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ const GLint* result_value = result->GetData();
+ FramebufferRenderbuffer fbrb_cmd;
+ GetFramebufferAttachmentParameteriv cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(static_cast<GLuint>(*result_value), client_renderbuffer_id_);
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithTexture) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->SetNumResults(0);
+ const GLint* result_value = result->GetData();
+ FramebufferTexture2D fbtex_cmd;
+ GetFramebufferAttachmentParameteriv cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(static_cast<GLuint>(*result_value), client_texture_id_);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ GetRenderbufferParameterivRebindRenderbuffer) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+
+ GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetRenderbufferParameterivEXT(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, _));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetRenderbufferParameterivWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_WIDTH,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, RenderbufferStorageRebindRenderbuffer) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+namespace {
+
+// A class to emulate glReadPixels
+class ReadPixelsEmulator {
+ public:
+ // pack_alignment is the alignment you want ReadPixels to use
+ // when copying. The actual data passed in pixels should be contiguous.
+ ReadPixelsEmulator(GLsizei width,
+ GLsizei height,
+ GLint bytes_per_pixel,
+ const void* src_pixels,
+ const void* expected_pixels,
+ GLint pack_alignment)
+ : width_(width),
+ height_(height),
+ pack_alignment_(pack_alignment),
+ bytes_per_pixel_(bytes_per_pixel),
+ src_pixels_(reinterpret_cast<const int8*>(src_pixels)),
+ expected_pixels_(reinterpret_cast<const int8*>(expected_pixels)) {}
+
+ void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) const {
+ DCHECK_GE(x, 0);
+ DCHECK_GE(y, 0);
+ DCHECK_LE(x + width, width_);
+ DCHECK_LE(y + height, height_);
+ for (GLint yy = 0; yy < height; ++yy) {
+ const int8* src = GetPixelAddress(src_pixels_, x, y + yy);
+ const void* dst = ComputePackAlignmentAddress(0, yy, width, pixels);
+ memcpy(const_cast<void*>(dst), src, width * bytes_per_pixel_);
+ }
+ }
+
+ bool CompareRowSegment(GLint x,
+ GLint y,
+ GLsizei width,
+ const void* data) const {
+ DCHECK(x + width <= width_ || width == 0);
+ return memcmp(data,
+ GetPixelAddress(expected_pixels_, x, y),
+ width * bytes_per_pixel_) == 0;
+ }
+
+ // Helper to compute address of pixel in pack aligned data.
+ const void* ComputePackAlignmentAddress(GLint x,
+ GLint y,
+ GLsizei width,
+ const void* address) const {
+ GLint unpadded_row_size = ComputeImageDataSize(width, 1);
+ GLint two_rows_size = ComputeImageDataSize(width, 2);
+ GLsizei padded_row_size = two_rows_size - unpadded_row_size;
+ GLint offset = y * padded_row_size + x * bytes_per_pixel_;
+ return static_cast<const int8*>(address) + offset;
+ }
+
+ GLint ComputeImageDataSize(GLint width, GLint height) const {
+ GLint row_size = width * bytes_per_pixel_;
+ if (height > 1) {
+ GLint temp = row_size + pack_alignment_ - 1;
+ GLint padded_row_size = (temp / pack_alignment_) * pack_alignment_;
+ GLint size_of_all_but_last_row = (height - 1) * padded_row_size;
+ return size_of_all_but_last_row + row_size;
+ } else {
+ return height * row_size;
+ }
+ }
+
+ private:
+ const int8* GetPixelAddress(const int8* base, GLint x, GLint y) const {
+ return base + (width_ * y + x) * bytes_per_pixel_;
+ }
+
+ GLsizei width_;
+ GLsizei height_;
+ GLint pack_alignment_;
+ GLint bytes_per_pixel_;
+ const int8* src_pixels_;
+ const int8* expected_pixels_;
+};
+
+} // anonymous namespace
+
+void GLES2DecoderTest::CheckReadPixelsOutOfRange(GLint in_read_x,
+ GLint in_read_y,
+ GLsizei in_read_width,
+ GLsizei in_read_height,
+ bool init) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 3;
+ const GLint kPackAlignment = 4;
+ const GLenum kFormat = GL_RGB;
+ static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
+ 29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
+ 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
+ };
+
+ ClearSharedMemory();
+
+ // We need to setup an FBO so we can know the max size that ReadPixels will
+ // access
+ if (init) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+
+ ReadPixelsEmulator emu(
+ kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ // ReadPixels will be called for valid size only even though the command
+ // is requesting a larger size.
+ GLint read_x = std::max(0, in_read_x);
+ GLint read_y = std::max(0, in_read_y);
+ GLint read_end_x = std::max(0, std::min(kWidth, in_read_x + in_read_width));
+ GLint read_end_y = std::max(0, std::min(kHeight, in_read_y + in_read_height));
+ GLint read_width = read_end_x - read_x;
+ GLint read_height = read_end_y - read_y;
+ if (read_width > 0 && read_height > 0) {
+ for (GLint yy = read_y; yy < read_end_y; ++yy) {
+ EXPECT_CALL(
+ *gl_,
+ ReadPixels(read_x, yy, read_width, 1, kFormat, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels))
+ .RetiresOnSaturation();
+ }
+ }
+ ReadPixels cmd;
+ cmd.Init(in_read_x,
+ in_read_y,
+ in_read_width,
+ in_read_height,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ GLint unpadded_row_size = emu.ComputeImageDataSize(in_read_width, 1);
+ scoped_ptr<int8[]> zero(new int8[unpadded_row_size]);
+ scoped_ptr<int8[]> pack(new int8[kPackAlignment]);
+ memset(zero.get(), 0, unpadded_row_size);
+ memset(pack.get(), kInitialMemoryValue, kPackAlignment);
+ for (GLint yy = 0; yy < in_read_height; ++yy) {
+ const int8* row = static_cast<const int8*>(
+ emu.ComputePackAlignmentAddress(0, yy, in_read_width, dest));
+ GLint y = in_read_y + yy;
+ if (y < 0 || y >= kHeight) {
+ EXPECT_EQ(0, memcmp(zero.get(), row, unpadded_row_size));
+ } else {
+ // check off left.
+ GLint num_left_pixels = std::max(-in_read_x, 0);
+ GLint num_left_bytes = num_left_pixels * kBytesPerPixel;
+ EXPECT_EQ(0, memcmp(zero.get(), row, num_left_bytes));
+
+ // check off right.
+ GLint num_right_pixels = std::max(in_read_x + in_read_width - kWidth, 0);
+ GLint num_right_bytes = num_right_pixels * kBytesPerPixel;
+ EXPECT_EQ(0,
+ memcmp(zero.get(),
+ row + unpadded_row_size - num_right_bytes,
+ num_right_bytes));
+
+ // check middle.
+ GLint x = std::max(in_read_x, 0);
+ GLint num_middle_pixels =
+ std::max(in_read_width - num_left_pixels - num_right_pixels, 0);
+ EXPECT_TRUE(
+ emu.CompareRowSegment(x, y, num_middle_pixels, row + num_left_bytes));
+ }
+
+ // check padding
+ if (yy != in_read_height - 1) {
+ GLint num_padding_bytes =
+ (kPackAlignment - 1) - (unpadded_row_size % kPackAlignment);
+ EXPECT_EQ(0,
+ memcmp(pack.get(), row + unpadded_row_size, num_padding_bytes));
+ }
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixels) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 3;
+ const GLint kPackAlignment = 4;
+ static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
+ 29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
+ 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
+ };
+
+ surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
+
+ ReadPixelsEmulator emu(
+ kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ for (GLint yy = 0; yy < kHeight; ++yy) {
+ EXPECT_TRUE(emu.CompareRowSegment(
+ 0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
+ }
+}
+
+TEST_P(GLES2DecoderRGBBackbufferTest, ReadPixelsNoAlphaBackbuffer) {
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 4;
+ const GLint kPackAlignment = 4;
+ static const uint8 kExpectedPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 255, 19, 18, 19, 255, 13, 14, 18, 255,
+ 29, 28, 23, 255, 21, 22, 21, 255, 28, 23, 22, 255,
+ 31, 34, 39, 255, 32, 37, 32, 255, 34, 39, 37, 255,
+ };
+ static const uint8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 29, 28, 23, 22, 21, 22,
+ 21, 29, 28, 23, 22, 21, 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32,
+ };
+
+ surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
+
+ ReadPixelsEmulator emu(kWidth,
+ kHeight,
+ kBytesPerPixel,
+ kSrcPixels,
+ kExpectedPixels,
+ kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ for (GLint yy = 0; yy < kHeight; ++yy) {
+ EXPECT_TRUE(emu.CompareRowSegment(
+ 0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsOutOfRange) {
+ static GLint tests[][4] = {
+ {
+ -2, -1, 9, 5,
+ }, // out of range on all sides
+ {
+ 2, 1, 9, 5,
+ }, // out of range on right, bottom
+ {
+ -7, -4, 9, 5,
+ }, // out of range on left, top
+ {
+ 0, -5, 9, 5,
+ }, // completely off top
+ {
+ 0, 3, 9, 5,
+ }, // completely off bottom
+ {
+ -9, 0, 9, 5,
+ }, // completely off left
+ {
+ 5, 0, 9, 5,
+ }, // completely off right
+ };
+
+ for (size_t tt = 0; tt < arraysize(tests); ++tt) {
+ CheckReadPixelsOutOfRange(
+ tests[tt][0], tests[tt][1], tests[tt][2], tests[tt][3], tt == 0);
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsInvalidArgs) {
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ EXPECT_CALL(*gl_, ReadPixels(_, _, _, _, _, _, _)).Times(0);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ -1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ -1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_INT,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ kInvalidSharedMemoryId,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ kInvalidSharedMemoryOffset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ kInvalidSharedMemoryId,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ kInvalidSharedMemoryOffset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ReadPixelsAsyncError) {
+ InitState init;
+ init.extensions = "GL_ARB_sync";
+ init.gl_version = "opengl es 3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ const GLsizei kWidth = 4;
+ const GLsizei kHeight = 4;
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ EXPECT_CALL(*gl_, GetError())
+ // first error check must pass to get to the test
+ .WillOnce(Return(GL_NO_ERROR))
+ // second check is after BufferData, simulate fail here
+ .WillOnce(Return(GL_INVALID_OPERATION))
+ // third error check is fall-through call to sync ReadPixels
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
+ .Times(1);
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _)).Times(1);
+ EXPECT_CALL(*gl_, BindBuffer(GL_PIXEL_PACK_BUFFER_ARB, _)).Times(2);
+ EXPECT_CALL(*gl_,
+ BufferData(GL_PIXEL_PACK_BUFFER_ARB, _, NULL, GL_STREAM_READ))
+ .Times(1);
+
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+// Check that if a renderbuffer is attached and GL returns
+// GL_FRAMEBUFFER_COMPLETE that the buffer is cleared and state is restored.
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearColor) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearColor color_cmd;
+ ColorMask color_mask_cmd;
+ Enable enable_cmd;
+ FramebufferRenderbuffer cmd;
+ color_cmd.Init(0.1f, 0.2f, 0.3f, 0.4f);
+ color_mask_cmd.Init(0, 1, 0, 1);
+ enable_cmd.Init(GL_SCISSOR_TEST);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearColor(0.1f, 0.2f, 0.3f, 0.4f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, true);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_mask_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(enable_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepth) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearDepthf depth_cmd;
+ DepthMask depth_mask_cmd;
+ FramebufferRenderbuffer cmd;
+ depth_cmd.Init(0.5f);
+ depth_mask_cmd.Init(false);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearDepth(0.5f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_mask_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearStencil) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearStencil stencil_cmd;
+ StencilMaskSeparate stencil_mask_separate_cmd;
+ FramebufferRenderbuffer cmd;
+ stencil_cmd.Init(123);
+ stencil_mask_separate_cmd.Init(GL_BACK, 0x1234u);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearStencil(123)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_mask_separate_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+#if 0 // Turn this test on once we allow GL_DEPTH_STENCIL_ATTACHMENT
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepthStencil) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ ClearDepthf depth_cmd;
+ ClearStencil stencil_cmd;
+ FramebufferRenderbuffer cmd;
+ depth_cmd.Init(0.5f);
+ stencil_cmd.Init(123);
+ cmd.Init(
+ GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearDepth(0.5f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(123))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(
+ GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+#endif
+
+TEST_P(GLES2DecoderManualInitTest, ActualAlphaMatchesRequestedAlpha) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualAlphaDoesNotMatchRequestedAlpha) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualDepthMatchesRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualDepthDoesNotMatchRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualStencilMatchesRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualStencilDoesNotMatchRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilReportsCorrectValues) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilNoRequestedStencil) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferDepth) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferStencil) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferGLError) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferTexture2DGLError) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLenum kFormat = GL_RGB;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferTexture2D fbtex_cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageGLError) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(GL_RENDERBUFFER, GL_RGBA, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageBadArgs) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, TestHelper::kMaxRenderbufferSize + 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 1, TestHelper::kMaxRenderbufferSize + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMGLError) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, 1, GL_RGBA, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER, 1, GL_RGBA4, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMBadArgs) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_CALL(*gl_, RenderbufferStorageMultisampleEXT(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples + 1,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize + 1,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ 1,
+ TestHelper::kMaxRenderbufferSize + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, RenderbufferStorageMultisampleCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ EnsureRenderbufferBound(false);
+ DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMRebindRenderbuffer) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RestoreRenderbufferBindings();
+ InSequence sequence;
+ EnsureRenderbufferBound(true);
+ DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleEXTNotSupported) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ // GL_EXT_framebuffer_multisample uses RenderbufferStorageMultisampleCHROMIUM.
+ RenderbufferStorageMultisampleEXT cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+class GLES2DecoderMultisampledRenderToTextureTest
+ : public GLES2DecoderTestWithExtensionsOnGLES2 {
+ public:
+ void TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM() {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+
+ void TestRenderbufferStorageMultisampleEXT(const char* extension,
+ bool rb_rebind) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ if (rb_rebind) {
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+ } else {
+ EnsureRenderbufferBound(false);
+ }
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ if (strstr(extension, "GL_IMG_multisampled_render_to_texture")) {
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleIMG(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleEXT cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderMultisampledRenderToTextureTest,
+ ::testing::Bool());
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_EXT) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_IMG) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_EXT) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
+ false);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_IMG) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
+ false);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_EXT_RebindRenderbuffer) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
+ true);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_IMG_RebindRenderbuffer) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
+ true);
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsGLError) {
+ GLenum kFormat = GL_RGBA;
+ GLint x = 0;
+ GLint y = 0;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(x, y, width, height, kFormat, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ReadPixels cmd;
+ cmd.Init(x,
+ y,
+ width,
+ height,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnClear) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ Clear cmd;
+ cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnReadPixels) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ UnClearedAttachmentsGetClearedOnReadPixelsAndDrawBufferGetsRestored) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render from" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_READ_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Enable GL_SCISSOR_TEST to make sure we disable it in the clear,
+ // then re-enable after.
+ DoEnableDisable(GL_SCISSOR_TEST, true);
+
+ SetupExpectationsForFramebufferClearingMulti(
+ kServiceFramebufferId, // read framebuffer service id
+ 0, // backbuffer service id
+ GL_READ_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ true); // scissor test
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ typedef ReadPixels::Result Result;
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(Result);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, CopyTexImageWithInCompleteFBOFails) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 0, 0, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ EXPECT_CALL(*gl_, CopyTexImage2D(_, _, _, _, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_FRAMEBUFFER_OPERATION, GetGLError());
+}
+
+void GLES2DecoderWithShaderTest::CheckRenderbufferChangesMarkFBOAsNotComplete(
+ bool bound_fbo) {
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ if (!bound_fbo) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+ }
+
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ ASSERT_TRUE(framebuffer != NULL);
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test that renderbufferStorage marks fbo as not complete.
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test deleting renderbuffer marks fbo as not complete.
+ DoDeleteRenderbuffer(client_renderbuffer_id_, kServiceRenderbufferId);
+ if (bound_fbo) {
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ } else {
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ }
+ // Cleanup
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ RenderbufferChangesMarkFBOAsNotCompleteBoundFBO) {
+ CheckRenderbufferChangesMarkFBOAsNotComplete(true);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ RenderbufferChangesMarkFBOAsNotCompleteUnboundFBO) {
+ CheckRenderbufferChangesMarkFBOAsNotComplete(false);
+}
+
+void GLES2DecoderWithShaderTest::CheckTextureChangesMarkFBOAsNotComplete(
+ bool bound_fbo) {
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ SetupTexture();
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER,
+ GL_DEPTH_COMPONENT16,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ if (!bound_fbo) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+ }
+
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ ASSERT_TRUE(framebuffer != NULL);
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test TexImage2D marks fbo as not complete.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, 0, 0);
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test CopyImage2D marks fbo as not complete.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test deleting texture marks fbo as not complete.
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ DoDeleteTexture(kFBOClientTextureId, kFBOServiceTextureId);
+
+ if (bound_fbo) {
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ } else {
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ }
+ // Cleanup
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, TextureChangesMarkFBOAsNotCompleteBoundFBO) {
+ CheckTextureChangesMarkFBOAsNotComplete(true);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ TextureChangesMarkFBOAsNotCompleteUnboundFBO) {
+ CheckTextureChangesMarkFBOAsNotComplete(false);
+}
+
+TEST_P(GLES2DecoderTest, CanChangeSurface) {
+ scoped_refptr<GLSurfaceMock> other_surface(new GLSurfaceMock);
+ EXPECT_CALL(*other_surface.get(), GetBackingFrameBufferObject())
+ .WillOnce(Return(7));
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER_EXT, 7));
+
+ decoder_->SetSurface(other_surface);
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateSuccceeds) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_COLOR_ATTACHMENT0};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateFails) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_COLOR_ATTACHMENT1_EXT};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateBackbuffer) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_BACK};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0); // unbind
+
+ EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, InvalidateFramebufferBinding) {
+ InitState init;
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+
+ // EXPECT_EQ can't be used to compare function pointers
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") !=
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT"));
+}
+
+TEST_P(GLES2DecoderManualInitTest, DiscardFramebufferEXT) {
+ InitState init;
+ init.extensions = "GL_EXT_discard_framebuffer";
+ init.gl_version = "opengl es 2.0";
+ InitDecoder(init);
+
+ // EXPECT_EQ can't be used to compare function pointers
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ SetupTexture();
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+
+ EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(framebuffer->IsCleared());
+}
+
+TEST_P(GLES2DecoderTest, DiscardFramebufferEXTUnsupported) {
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_EXT};
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ // Should not result into a call into GL.
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ DiscardedAttachmentsEXTMarksFramebufferIncomplete) {
+ InitState init;
+ init.extensions = "GL_EXT_discard_framebuffer";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ Clear clear_cmd;
+ clear_cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(clear_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that framebuffer is cleared and complete.
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Check that Discard GL_COLOR_ATTACHMENT0, sets the attachment as uncleared
+ // and the framebuffer as incomplete.
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ DiscardFramebufferEXTImmediate& discard_cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ discard_cmd.Init(target, count, attachments);
+
+ EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(discard_cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(framebuffer->IsCleared());
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ReadFormatExtension) {
+ InitState init;
+ init.extensions = "GL_OES_read_format";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError()).Times(6).RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoReadFormatExtension) {
+ InitState init;
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// TODO(gman): PixelStorei
+
+// TODO(gman): SwapBuffers
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
new file mode 100644
index 0000000..05cb9ff
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -0,0 +1,1045 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderWithShaderTest, GetProgramInfoCHROMIUMValidArgs) {
+ const uint32 kBucketId = 123;
+ GetProgramInfoCHROMIUM cmd;
+ cmd.Init(client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ EXPECT_GT(bucket->size(), 0u);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetProgramInfoCHROMIUMInvalidArgs) {
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ EXPECT_TRUE(bucket == NULL);
+ GetProgramInfoCHROMIUM cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(sizeof(ProgramInfoHeader), bucket->size());
+ ProgramInfoHeader* info =
+ bucket->GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(info != 0);
+ EXPECT_EQ(0u, info->link_status);
+ EXPECT_EQ(0u, info->num_attribs);
+ EXPECT_EQ(0u, info->num_uniforms);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivSucceeds) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(kServiceProgramId, kUniform2RealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivArrayElementSucceeds) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2ElementFakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ GetUniformiv(kServiceProgramId, kUniform2ElementRealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadProgramFails) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ // non-existant program
+ cmd.Init(kInvalidClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+// Valid id that is not a program. The GL spec requires a different error for
+// this case.
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->size = kInitialResult;
+ cmd.Init(client_shader_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ // Unlinked program
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(kNewServiceId))
+ .RetiresOnSaturation();
+ CreateProgram cmd2;
+ cmd2.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ result->size = kInitialResult;
+ cmd.Init(kNewClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadLocationFails) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ // invalid location
+ cmd.Init(client_program_id_,
+ kInvalidUniformLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadSharedMemoryFails) {
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+};
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvSucceeds) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(kServiceProgramId, kUniform2RealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvArrayElementSucceeds) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2ElementFakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ GetUniformfv(kServiceProgramId, kUniform2ElementRealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadProgramFails) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ // non-existant program
+ cmd.Init(kInvalidClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+// Valid id that is not a program. The GL spec requires a different error for
+// this case.
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->size = kInitialResult;
+ cmd.Init(client_shader_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ // Unlinked program
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(kNewServiceId))
+ .RetiresOnSaturation();
+ CreateProgram cmd2;
+ cmd2.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ result->size = kInitialResult;
+ cmd.Init(kNewClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadLocationFails) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ // invalid location
+ cmd.Init(client_program_id_,
+ kInvalidUniformLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadSharedMemoryFails) {
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+};
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersSucceeds) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetAttachedShaders(kServiceProgramId, 1, _, _)).WillOnce(
+ DoAll(SetArgumentPointee<2>(1), SetArgumentPointee<3>(kServiceShaderId)));
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(client_shader_id_, result->GetData()[0]);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersResultNotInitFail) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 1;
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersBadProgramFails) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ cmd.Init(kInvalidClientId,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersBadSharedMemoryFails) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ cmd.Init(client_program_id_,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset,
+ Result::ComputeSize(1));
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatSucceeds) {
+ ScopedGLImplementationSetter gl_impl(::gfx::kGLImplementationEGLGLES2);
+ GetShaderPrecisionFormat cmd;
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ const GLint range[2] = {62, 62};
+ const GLint precision = 16;
+ EXPECT_CALL(*gl_, GetShaderPrecisionFormat(_, _, _, _))
+ .WillOnce(DoAll(SetArrayArgument<2>(range, range + 2),
+ SetArgumentPointee<3>(precision)))
+ .RetiresOnSaturation();
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(range[0], result->min_range);
+ EXPECT_EQ(range[1], result->max_range);
+ EXPECT_EQ(precision, result->precision);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatResultNotInitFails) {
+ GetShaderPrecisionFormat cmd;
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ // NOTE: GL might not be called. There is no Desktop OpenGL equivalent
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatBadArgsFails) {
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ GetShaderPrecisionFormat cmd;
+ cmd.Init(
+ GL_TEXTURE_2D, GL_HIGH_FLOAT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ result->success = 0;
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_TEXTURE_2D,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ GetShaderPrecisionFormatBadSharedMemoryFails) {
+ GetShaderPrecisionFormat cmd;
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_TEXTURE_2D,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformSucceeds) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(kUniform2Size, result->size);
+ EXPECT_EQ(kUniform2Type, result->type);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(
+ 0,
+ memcmp(
+ bucket->GetData(0, bucket->size()), kUniform2Name, bucket->size()));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformResultNotInitFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadProgramFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(kInvalidClientId,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->success = 0;
+ cmd.Init(client_shader_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadIndexFails) {
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kBadUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadSharedMemoryFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribSucceeds) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(kAttrib2Size, result->size);
+ EXPECT_EQ(kAttrib2Type, result->type);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(
+ 0,
+ memcmp(bucket->GetData(0, bucket->size()), kAttrib2Name, bucket->size()));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribResultNotInitFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadProgramFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(kInvalidClientId,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->success = 0;
+ cmd.Init(client_shader_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadIndexFails) {
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kBadAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadSharedMemoryFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderInfoLogValidArgs) {
+ const char* kInfo = "hello";
+ const uint32 kBucketId = 123;
+ CompileShader compile_cmd;
+ GetShaderInfoLog cmd;
+ EXPECT_CALL(*gl_, ShaderSource(kServiceShaderId, 1, _, _));
+ EXPECT_CALL(*gl_, CompileShader(kServiceShaderId));
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_FALSE))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(strlen(kInfo) + 1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetShaderInfoLog(kServiceShaderId, strlen(kInfo) + 1, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<2>(strlen(kInfo)),
+ SetArrayArgument<3>(kInfo, kInfo + strlen(kInfo) + 1)));
+ compile_cmd.Init(client_shader_id_);
+ cmd.Init(client_shader_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(compile_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
+ EXPECT_EQ(0,
+ memcmp(bucket->GetData(0, bucket->size()), kInfo, bucket->size()));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderInfoLogInvalidArgs) {
+ const uint32 kBucketId = 123;
+ GetShaderInfoLog cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, CompileShaderValidArgs) {
+ EXPECT_CALL(*gl_, ShaderSource(kServiceShaderId, 1, _, _));
+ EXPECT_CALL(*gl_, CompileShader(kServiceShaderId));
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE))
+ .RetiresOnSaturation();
+ CompileShader cmd;
+ cmd.Init(client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CompileShaderInvalidArgs) {
+ CompileShader cmd;
+ cmd.Init(kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceBucketAndGetShaderSourceValidArgs) {
+ const uint32 kInBucketId = 123;
+ const uint32 kOutBucketId = 125;
+ const char kSource[] = "hello";
+ const uint32 kSourceSize = sizeof(kSource) - 1;
+ SetBucketAsCString(kInBucketId, kSource);
+ ShaderSourceBucket cmd;
+ cmd.Init(client_shader_id_, kInBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ ClearSharedMemory();
+ GetShaderSource get_cmd;
+ get_cmd.Init(client_shader_id_, kOutBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(get_cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kOutBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(kSourceSize + 1, bucket->size());
+ EXPECT_EQ(
+ 0, memcmp(bucket->GetData(0, bucket->size()), kSource, bucket->size()));
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceBucketInvalidArgs) {
+ const uint32 kBucketId = 123;
+ const char kSource[] = "hello";
+ const uint32 kSourceSize = sizeof(kSource) - 1;
+ memcpy(shared_memory_address_, kSource, kSourceSize);
+ ShaderSourceBucket cmd;
+ // Test no bucket.
+ cmd.Init(client_texture_id_, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // Test invalid client.
+ SetBucketAsCString(kBucketId, kSource);
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ SetBucketAsCString(kBucketId, kSource);
+ cmd.Init(
+ client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceStripComments) {
+ const uint32 kInBucketId = 123;
+ const char kSource[] = "hello/*te\ast*/world//a\ab";
+ SetBucketAsCString(kInBucketId, kSource);
+ ShaderSourceBucket cmd;
+ cmd.Init(client_shader_id_, kInBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1i(kUniform1RealLocation, 2));
+ Uniform1i cmd;
+ cmd.Init(kUniform1FakeLocation, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivImmediateValidArgs) {
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ EXPECT_CALL(*gl_,
+ Uniform1iv(kUniform1RealLocation,
+ 1,
+ reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ GLint temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(kUniform1FakeLocation, 1, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivImmediateInvalidValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(kUniform1FakeLocation, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivZeroCount) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp = 0;
+ cmd.Init(kUniform1FakeLocation, 0, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1iSamplerIsLmited) {
+ EXPECT_CALL(*gl_, Uniform1i(_, _)).Times(0);
+ Uniform1i cmd;
+ cmd.Init(kUniform1FakeLocation, kNumTextureUnits);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivSamplerIsLimited) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp[] = {kNumTextureUnits};
+ cmd.Init(kUniform1FakeLocation, 1, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, BindAttribLocationBucket) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ EXPECT_CALL(*gl_,
+ BindAttribLocation(kServiceProgramId, kLocation, StrEq(kName)))
+ .Times(1);
+ SetBucketAsCString(kBucketId, kName);
+ BindAttribLocationBucket cmd;
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, BindAttribLocationBucketInvalidArgs) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ EXPECT_CALL(*gl_, BindAttribLocation(_, _, _)).Times(0);
+ BindAttribLocationBucket cmd;
+ // check bucket does not exist.
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // check bucket is empty.
+ SetBucketAsCString(kBucketId, NULL);
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // Check bad program id
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(kInvalidClientId, kLocation, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttribLocation) {
+ const uint32 kBucketId = 123;
+ const char* kNonExistentName = "foobar";
+ typedef GetAttribLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ SetBucketAsCString(kBucketId, kAttrib2Name);
+ *result = -1;
+ GetAttribLocation cmd;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kAttrib2Location, *result);
+ SetBucketAsCString(kBucketId, kNonExistentName);
+ *result = -1;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttribLocationInvalidArgs) {
+ const uint32 kBucketId = 123;
+ typedef GetAttribLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = -1;
+ GetAttribLocation cmd;
+ // Check no bucket
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ // Check bad program id.
+ SetBucketAsCString(kBucketId, kAttrib2Name);
+ cmd.Init(kInvalidClientId, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ *result = -1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad memory
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformLocation) {
+ const uint32 kBucketId = 123;
+ const char* kNonExistentName = "foobar";
+ typedef GetUniformLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ SetBucketAsCString(kBucketId, kUniform2Name);
+ *result = -1;
+ GetUniformLocation cmd;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kUniform2FakeLocation, *result);
+ SetBucketAsCString(kBucketId, kNonExistentName);
+ *result = -1;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformLocationInvalidArgs) {
+ const uint32 kBucketId = 123;
+ typedef GetUniformLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = -1;
+ GetUniformLocation cmd;
+ // Check no bucket
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ // Check bad program id.
+ SetBucketAsCString(kBucketId, kUniform2Name);
+ cmd.Init(kInvalidClientId, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ *result = -1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad memory
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindUniformLocationCHROMIUMBucket) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ const char* kBadName1 = "gl_testing";
+ const char* kBadName2 = "testing[1]";
+
+ SetBucketAsCString(kBucketId, kName);
+ BindUniformLocationCHROMIUMBucket cmd;
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // check negative location
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(client_program_id_, -1, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // check highest location
+ SetBucketAsCString(kBucketId, kName);
+ GLint kMaxLocation =
+ (kMaxFragmentUniformVectors + kMaxVertexUniformVectors) * 4 - 1;
+ cmd.Init(client_program_id_,
+ kMaxLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // check too high location
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(client_program_id_,
+ kMaxLocation + 1,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // check bad name "gl_..."
+ SetBucketAsCString(kBucketId, kBadName1);
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // check bad name "name[1]" non zero
+ SetBucketAsCString(kBucketId, kBadName2);
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, ClearUniformsBeforeFirstProgramUse) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::CLEAR_UNIFORMS_BEFORE_FIRST_PROGRAM_USE));
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ {
+ static AttribInfo attribs[] = {
+ {
+ kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location,
+ },
+ {
+ kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location,
+ },
+ {
+ kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location,
+ },
+ };
+ static UniformInfo uniforms[] = {
+ {kUniform1Name, kUniform1Size, kUniform1Type, kUniform1FakeLocation,
+ kUniform1RealLocation, kUniform1DesiredLocation},
+ {kUniform2Name, kUniform2Size, kUniform2Type, kUniform2FakeLocation,
+ kUniform2RealLocation, kUniform2DesiredLocation},
+ {kUniform3Name, kUniform3Size, kUniform3Type, kUniform3FakeLocation,
+ kUniform3RealLocation, kUniform3DesiredLocation},
+ };
+ SetupShader(attribs,
+ arraysize(attribs),
+ uniforms,
+ arraysize(uniforms),
+ client_program_id_,
+ kServiceProgramId,
+ client_vertex_shader_id_,
+ kServiceVertexShaderId,
+ client_fragment_shader_id_,
+ kServiceFragmentShaderId);
+ TestHelper::SetupExpectationsForClearingUniforms(
+ gl_.get(), uniforms, arraysize(uniforms));
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+// TODO(gman): DeleteProgram
+
+// TODO(gman): UseProgram
+
+// TODO(gman): DeleteShader
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
new file mode 100644
index 0000000..82d5653
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -0,0 +1,2842 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderTest, GenerateMipmapWrongFormatsFails) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GenerateMipmapHandlesOutOfMemory) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ GLint width = 0;
+ GLint height = 0;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 2, &width, &height));
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 16,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D)).Times(1);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 2, &width, &height));
+}
+
+TEST_P(GLES2DecoderTest, GenerateMipmapClearsUnclearedTexture) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Same as GenerateMipmapClearsUnclearedTexture, but with workaround
+// |set_texture_filters_before_generating_mipmap|.
+TEST_P(GLES2DecoderManualInitTest, SetTextureFiltersBeforeGenerateMipmap) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::SET_TEXTURE_FILTER_BEFORE_GENERATING_MIPMAP));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ActiveTextureValidArgs) {
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ActiveTextureInvalidArgs) {
+ EXPECT_CALL(*gl_, ActiveTexture(_)).Times(0);
+ SpecializedSetup<ActiveTexture, 0>(false);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE0 - 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(kNumTextureUnits);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DValidArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DBadArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE0,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_TRUE,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ -1,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ -1,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 1,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth + 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight + 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DValidArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ CopyTexSubImage2D(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DBadArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE0, 1, 0, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, -1, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 1, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, -1, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 1, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth + 1, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexImage2DRedefinitionSucceeds) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
+ for (int ii = 0; ii < 2; ++ii) {
+ TexImage2D cmd;
+ if (ii == 0) {
+ EXPECT_CALL(*gl_,
+ TexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ } else {
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kWidth,
+ kHeight);
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ }
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight - 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Consider this TexSubImage2D command part of the previous TexImage2D
+ // (last GL_TRUE argument). It will be skipped if there are bugs in the
+ // redefinition case.
+ TexSubImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight - 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_TRUE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ }
+}
+
+TEST_P(GLES2DecoderTest, TexImage2DGLError) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLint border = 0;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexImage2D(target,
+ level,
+ internal_format,
+ width,
+ height,
+ border,
+ format,
+ type,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexImage2DGLError) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLint border = 0;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ target, level, internal_format, 0, 0, width, height, border))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DBucketBadBucket) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ const uint32 kBadBucketId = 123;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ CompressedTexImage2DBucket cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,
+ 4,
+ 4,
+ kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ CompressedTexSubImage2DBucket cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 4,
+ 4,
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,
+ kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+namespace {
+
+struct S3TCTestData {
+ GLenum format;
+ size_t block_size;
+};
+
+} // anonymous namespace.
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ static const S3TCTestData test_data[] = {
+ {
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT, 8,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, 8,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, 16,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, 16,
+ },
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_data); ++ii) {
+ const S3TCTestData& test = test_data[ii];
+ CompressedTexImage2DBucket cmd;
+ // test small width.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 2, 4, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test bad width.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 5, 4, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // test small height.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 4, 2, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test too bad height.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 5, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // test small for level 0.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 1, 1, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test small for level 0.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 2, 2, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test size too large.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 4, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test size too small.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 4, kBucketId);
+ bucket->SetSize(test.block_size / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test with 3 mips.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 4, 4, 0, test.block_size, kBucketId);
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 1, test.format, 2, 2, 0, test.block_size, kBucketId);
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 2, test.format, 1, 1, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Test a 16x16
+ DoCompressedTexImage2D(GL_TEXTURE_2D,
+ 0,
+ test.format,
+ 16,
+ 16,
+ 0,
+ test.block_size * 4 * 4,
+ kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ CompressedTexSubImage2DBucket sub_cmd;
+ bucket->SetSize(test.block_size);
+ // Test sub image bad xoffset
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 1, 0, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad yoffset
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 2, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad width
+ bucket->SetSize(test.block_size * 2);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 5, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad height
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 5, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad size
+ bucket->SetSize(test.block_size + 1);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ for (GLint yoffset = 0; yoffset <= 8; yoffset += 4) {
+ for (GLint xoffset = 0; xoffset <= 8; xoffset += 4) {
+ for (GLsizei height = 4; height <= 8; height += 4) {
+ for (GLsizei width = 4; width <= 8; width += 4) {
+ GLsizei size = test.block_size * (width / 4) * (height / 4);
+ bucket->SetSize(size);
+ EXPECT_CALL(*gl_,
+ CompressedTexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ test.format,
+ size,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ sub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ test.format,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DETC1) {
+ InitState init;
+ init.extensions = "GL_OES_compressed_ETC1_RGB8_texture";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ const GLenum kFormat = GL_ETC1_RGB8_OES;
+ const size_t kBlockSize = 8;
+
+ CompressedTexImage2DBucket cmd;
+ // test small width.
+ DoCompressedTexImage2D(GL_TEXTURE_2D, 0, kFormat, 4, 8, 0, 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test small height.
+ DoCompressedTexImage2D(GL_TEXTURE_2D, 0, kFormat, 8, 4, 0, 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test size too large.
+ cmd.Init(GL_TEXTURE_2D, 0, kFormat, 4, 4, kBucketId);
+ bucket->SetSize(kBlockSize * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test size too small.
+ cmd.Init(GL_TEXTURE_2D, 0, kFormat, 4, 4, kBucketId);
+ bucket->SetSize(kBlockSize / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // Test a 16x16
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, kFormat, 16, 16, 0, kBlockSize * 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Test CompressedTexSubImage not allowed
+ CompressedTexSubImage2DBucket sub_cmd;
+ bucket->SetSize(kBlockSize);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 4, kFormat, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test TexSubImage not allowed for ETC1 compressed texture
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ GLenum type, internal_format;
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(kFormat, internal_format);
+ TexSubImage2D texsub_cmd;
+ texsub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 4,
+ 4,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test CopyTexSubImage not allowed for ETC1 compressed texture
+ CopyTexSubImage2D copy_cmd;
+ copy_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 4, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalBindTexture) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_EXTERNAL_OES, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ BindTexture cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ TextureRef* texture_ref = GetTexture(kNewClientId);
+ EXPECT_TRUE(texture_ref != NULL);
+ EXPECT_TRUE(texture_ref->texture()->target() == GL_TEXTURE_EXTERNAL_OES);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalGetBinding) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd;
+ cmd.Init(GL_TEXTURE_BINDING_EXTERNAL_OES,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_TEXTURE_BINDING_EXTERNAL_OES),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(client_texture_id_, (uint32)result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureDefaults) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureParam) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureParamInvalid) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES,
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTexImage2DError) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GLenum target = GL_TEXTURE_EXTERNAL_OES;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+ ASSERT_TRUE(GetTexture(client_texture_id_) != NULL);
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ // TexImage2D is not allowed with GL_TEXTURE_EXTERNAL_OES targets.
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DefaultTextureZero) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ BindTexture cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DefaultTextureBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(
+ *gl_, BindTexture(GL_TEXTURE_2D, TestHelper::kServiceDefaultTexture2dId));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ BindTexture cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Test that default texture 0 is immutable.
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameterf) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameterf cmd2;
+ cmd2.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameterf cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameteri) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameteri cmd2;
+ cmd2.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameteri cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameterfv) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameteriv) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexImage2D) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexSubImage2D) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexSubImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleBindTexture) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_RECTANGLE_ARB, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ BindTexture cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ Texture* texture = GetTexture(kNewClientId)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleGetBinding) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetIntegerv(GL_TEXTURE_BINDING_RECTANGLE_ARB, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd;
+ cmd.Init(GL_TEXTURE_BINDING_RECTANGLE_ARB,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_TEXTURE_BINDING_RECTANGLE_ARB),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(client_texture_id_, (uint32)result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureDefaults) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureParam) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureParamInvalid) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB,
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTexImage2DError) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GLenum target = GL_TEXTURE_RECTANGLE_ARB;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+ ASSERT_TRUE(GetTexture(client_texture_id_) != NULL);
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ // TexImage2D is not allowed with GL_TEXTURE_RECTANGLE_ARB targets.
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DClearsAfterTexImage2DNULL) {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DDoesNotClearAfterTexImage2DNULLThenData) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(
+ GLES2DecoderManualInitTest,
+ TexSubImage2DDoesNotClearAfterTexImage2DNULLThenDataWithTexImage2DIsFaster) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::TEXSUBIMAGE2D_FASTER_THAN_TEXIMAGE2D));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+
+ {
+ // Uses texSubimage internally because the above workaround is active and
+ // the update is for the full size of the texture.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(
+ GL_TEXTURE_2D, 0, 0, 0, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DClearsAfterTexImage2DWithDataThenNULL) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Put in data (so it should be marked as cleared)
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ // Put in no data.
+ TexImage2D tex_cmd;
+ tex_cmd.Init(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // It won't actually call TexImage2D, just mark it as uncleared.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ // Next call to TexSubImage2d should clear.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexImage2DMarksTextureAsCleared) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 1, 1, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DClearsUnclearedTexture) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedImage2DMarksTextureAsCleared) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ CompressedTexImage2D(
+ GL_TEXTURE_2D, 0, GL_COMPRESSED_RGB_S3TC_DXT1_EXT, 4, 4, 0, 8, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CompressedTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT,
+ 4,
+ 4,
+ 8,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref->texture()->SafeToRenderFrom());
+}
+
+TEST_P(GLES2DecoderTest, TextureUsageAngleExtNotEnabledByDefault) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(
+ GL_TEXTURE_2D, GL_TEXTURE_USAGE_ANGLE, GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ProduceAndConsumeTextureCHROMIUM) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 2, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureCHROMIUMImmediate>();
+ produce_cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture didn't change.
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID has not changed.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ // Create new texture for consume.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ DoBindTexture(GL_TEXTURE_2D, kNewClientId, kNewServiceId);
+
+ // Assigns and binds original service size texture ID.
+ EXPECT_CALL(*gl_, DeleteTextures(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ ConsumeTextureCHROMIUMImmediate& consume_cmd =
+ *GetImmediateAs<ConsumeTextureCHROMIUMImmediate>();
+ consume_cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(consume_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture is redefined.
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID is restored.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+}
+
+TEST_P(GLES2DecoderTest, ProduceAndConsumeDirectTextureCHROMIUM) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 2, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureDirectCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureDirectCHROMIUMImmediate>();
+ produce_cmd.Init(client_texture_id_, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture didn't change.
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID has not changed.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ // Consume the texture into a new client ID.
+ GLuint new_texture_id = kNewClientId;
+ CreateAndConsumeTextureCHROMIUMImmediate& consume_cmd =
+ *GetImmediateAs<CreateAndConsumeTextureCHROMIUMImmediate>();
+ consume_cmd.Init(GL_TEXTURE_2D, new_texture_id, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(consume_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Make sure the new client ID is associated with the produced service ID.
+ texture_ref = group().texture_manager()->GetTexture(new_texture_id);
+ ASSERT_TRUE(texture_ref != NULL);
+ texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ DoBindTexture(GL_TEXTURE_2D, kNewClientId, kServiceTextureId);
+
+ // Texture is redefined.
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+}
+
+TEST_P(GLES2DecoderTest, ProduceTextureCHROMIUMInvalidTarget) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, 3, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureDirectCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureDirectCHROMIUMImmediate>();
+ produce_cmd.Init(client_texture_id_, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+
+ // ProduceTexture should fail it the texture and produce targets don't match.
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthTextureBadArgs) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Check trying to upload data fails.
+ TexImage2D tex_cmd;
+ tex_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // Try level > 0.
+ tex_cmd.Init(GL_TEXTURE_2D,
+ 1,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // Make a 1 pixel depth texture.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that trying to update it fails.
+ TexSubImage2D tex_sub_cmd;
+ tex_sub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Check that trying to CopyTexImage2D fails
+ CopyTexImage2D copy_tex_cmd;
+ copy_tex_cmd.Init(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Check that trying to CopyTexSubImage2D fails
+ CopyTexSubImage2D copy_sub_cmd;
+ copy_sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, GenerateMipmapDepthTexture) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 2,
+ 2,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, BindTexImage2DCHROMIUM) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ EXPECT_FALSE(GetImageManager()->LookupImage(1) == NULL);
+
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Bind image to texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should now be set.
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Define new texture image.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should no longer be set.
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+TEST_P(GLES2DecoderTest, BindTexImage2DCHROMIUMCubeMapNotAllowed) {
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_CUBE_MAP, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, OrphanGLImageWithTexImage2D) {
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_CUBE_MAP, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+TEST_P(GLES2DecoderTest, ReleaseTexImage2DCHROMIUM) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ EXPECT_FALSE(GetImageManager()->LookupImage(1) == NULL);
+
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Bind image to texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should now be set.
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Release image from texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ ReleaseTexImage2DCHROMIUM release_tex_image_2d_cmd;
+ release_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(release_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should no longer be set.
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+class MockGLImage : public gfx::GLImage {
+ public:
+ MockGLImage() {}
+
+ // Overridden from gfx::GLImage:
+ MOCK_METHOD0(GetSize, gfx::Size());
+ MOCK_METHOD1(Destroy, void(bool));
+ MOCK_METHOD1(BindTexImage, bool(unsigned));
+ MOCK_METHOD1(ReleaseTexImage, void(unsigned));
+ MOCK_METHOD1(CopyTexImage, bool(unsigned));
+ MOCK_METHOD0(WillUseTexImage, void());
+ MOCK_METHOD0(DidUseTexImage, void());
+ MOCK_METHOD0(WillModifyTexImage, void());
+ MOCK_METHOD0(DidModifyTexImage, void());
+ MOCK_METHOD5(ScheduleOverlayPlane, bool(gfx::AcceleratedWidget,
+ int,
+ gfx::OverlayTransform,
+ const gfx::Rect&,
+ const gfx::RectF&));
+
+ protected:
+ virtual ~MockGLImage() {}
+};
+
+TEST_P(GLES2DecoderWithShaderTest, UseTexImage) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ const int32 kImageId = 1;
+ scoped_refptr<MockGLImage> image(new MockGLImage);
+ GetImageManager()->AddImage(image.get(), kImageId);
+
+ // Bind image to texture.
+ EXPECT_CALL(*image.get(), BindTexImage(GL_TEXTURE_2D))
+ .Times(1)
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), GetSize())
+ .Times(1)
+ .WillOnce(Return(gfx::Size(1, 1)))
+ .RetiresOnSaturation();
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, kImageId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(3).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(2)
+ .RetiresOnSaturation();
+ // Image will be 'in use' as long as bound to a framebuffer.
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ FramebufferTexture2D fbtex_cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(2)
+ .RetiresOnSaturation();
+ // Image should no longer be 'in use' after being unbound from framebuffer.
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawWithGLImageExternal) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ scoped_refptr<MockGLImage> image(new MockGLImage);
+ group().texture_manager()->SetTarget(texture_ref, GL_TEXTURE_EXTERNAL_OES);
+ group().texture_manager()->SetLevelInfo(texture_ref,
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ group().texture_manager()->SetLevelImage(
+ texture_ref, GL_TEXTURE_EXTERNAL_OES, 0, image.get());
+
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupSamplerExternalProgram();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_TRUE(group().texture_manager()->CanRender(texture_ref));
+
+ InSequence s;
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(1);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatOnGLES2) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float";
+ init.gl_version = "opengl es 2.0";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_LUMINANCE, 16, 17, 0, GL_LUMINANCE, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 16, 17, 0, GL_ALPHA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0);
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA32F, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_LUMINANCE, 16, 17, 0, GL_LUMINANCE, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 16, 17, 0, GL_ALPHA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0);
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DFloatOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ const int kWidth = 8;
+ const int kHeight = 4;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0);
+ EXPECT_CALL(*gl_,
+ TexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DFloatDoesClearOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ const int kWidth = 8;
+ const int kHeight = 4;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ GL_RGBA,
+ GL_FLOAT,
+ kWidth,
+ kHeight);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatConvertsFormatDesktop) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_float";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA32F, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 17,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_RGBA32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_RGB,
+ 16,
+ 17,
+ 0,
+ GL_RGB,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_RGB32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_LUMINANCE32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_ALPHA32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_LUMINANCE_ALPHA32F_ARB);
+}
+
+class GLES2DecoderCompressedFormatsTest : public GLES2DecoderManualInitTest {
+ public:
+ GLES2DecoderCompressedFormatsTest() {}
+
+ static bool ValueInArray(GLint value, GLint* array, GLint count) {
+ for (GLint ii = 0; ii < count; ++ii) {
+ if (array[ii] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void CheckFormats(const char* extension, const GLenum* formats, int count) {
+ InitState init;
+ init.extensions = extension;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ GLint num_formats = result->GetData()[0];
+ EXPECT_EQ(count, num_formats);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ cmd.Init(GL_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(num_formats, result->GetNumResults());
+
+ for (int i = 0; i < count; ++i) {
+ EXPECT_TRUE(
+ ValueInArray(formats[i], result->GetData(), result->GetNumResults()));
+ }
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderCompressedFormatsTest,
+ ::testing::Bool());
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsS3TC) {
+ const GLenum formats[] = {
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT};
+ CheckFormats("GL_EXT_texture_compression_s3tc", formats, 4);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsATC) {
+ const GLenum formats[] = {GL_ATC_RGB_AMD, GL_ATC_RGBA_EXPLICIT_ALPHA_AMD,
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD};
+ CheckFormats("GL_AMD_compressed_ATC_texture", formats, 3);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsPVRTC) {
+ const GLenum formats[] = {
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG, GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG,
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG, GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG};
+ CheckFormats("GL_IMG_texture_compression_pvrtc", formats, 4);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsETC1) {
+ const GLenum formats[] = {GL_ETC1_RGB8_OES};
+ CheckFormats("GL_OES_compressed_ETC1_RGB8_texture", formats, 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest, GetNoCompressedTextureFormats) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ GLint num_formats = result->GetData()[0];
+ EXPECT_EQ(0, num_formats);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ cmd.Init(
+ GL_COMPRESSED_TEXTURE_FORMATS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(num_formats, result->GetNumResults());
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// TODO(gman): Complete this test.
+// TEST_P(GLES2DecoderTest, CompressedTexImage2DGLError) {
+// }
+
+// TODO(gman): CompressedTexImage2D
+
+// TODO(gman): CompressedTexImage2DImmediate
+
+// TODO(gman): CompressedTexSubImage2DImmediate
+
+// TODO(gman): TexImage2D
+
+// TODO(gman): TexImage2DImmediate
+
+// TODO(gman): TexSubImage2DImmediate
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_validation.cc b/gpu/command_buffer/service/gles2_cmd_validation.cc
new file mode 100644
index 0000000..8d4fd71
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains various validation functions for the GLES2 service.
+
+#include "base/basictypes.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+#include "gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/service/gles2_cmd_validation.h b/gpu/command_buffer/service/gles2_cmd_validation.h
new file mode 100644
index 0000000..22ee2da
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains various validation functions for the GLES2 service.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+
+#include <algorithm>
+#include <vector>
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+namespace gles2 {
+
+// ValueValidator returns true if a value is valid.
+template <typename T>
+class ValueValidator {
+ public:
+ ValueValidator() {}
+
+ ValueValidator(const T* valid_values, int num_values) {
+ for (int ii = 0; ii < num_values; ++ii) {
+ AddValue(valid_values[ii]);
+ }
+ }
+
+ void AddValue(const T value) {
+ if (!IsValid(value)) {
+ valid_values_.push_back(value);
+ }
+ }
+
+ bool IsValid(const T value) const {
+ return std::find(valid_values_.begin(), valid_values_.end(), value) !=
+ valid_values_.end();
+ }
+
+ const std::vector<T>& GetValues() const {
+ return valid_values_;
+ }
+
+ private:
+ std::vector<T> valid_values_;
+};
+
+struct Validators {
+ Validators();
+#include "gpu/command_buffer/service/gles2_cmd_validation_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+
diff --git a/gpu/command_buffer/service/gles2_cmd_validation_autogen.h b/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
new file mode 100644
index 0000000..de84037
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
+
+ValueValidator<GLenum> attachment;
+ValueValidator<GLenum> backbuffer_attachment;
+ValueValidator<GLenum> blit_filter;
+ValueValidator<GLenum> buffer_parameter;
+ValueValidator<GLenum> buffer_target;
+ValueValidator<GLenum> buffer_usage;
+ValueValidator<GLenum> capability;
+ValueValidator<GLenum> cmp_function;
+ValueValidator<GLenum> compressed_texture_format;
+ValueValidator<GLenum> draw_mode;
+ValueValidator<GLenum> dst_blend_factor;
+ValueValidator<GLenum> equation;
+ValueValidator<GLenum> face_mode;
+ValueValidator<GLenum> face_type;
+ValueValidator<GLenum> frame_buffer_parameter;
+ValueValidator<GLenum> frame_buffer_target;
+ValueValidator<GLenum> g_l_state;
+ValueValidator<GLenum> get_max_index_type;
+ValueValidator<GLenum> get_tex_param_target;
+ValueValidator<GLenum> hint_mode;
+ValueValidator<GLenum> hint_target;
+ValueValidator<GLenum> image_internal_format;
+ValueValidator<GLenum> image_usage;
+ValueValidator<GLenum> index_type;
+ValueValidator<GLenum> matrix_mode;
+ValueValidator<GLenum> pixel_store;
+ValueValidator<GLint> pixel_store_alignment;
+ValueValidator<GLenum> pixel_type;
+ValueValidator<GLenum> program_parameter;
+ValueValidator<GLenum> query_object_parameter;
+ValueValidator<GLenum> query_parameter;
+ValueValidator<GLenum> query_target;
+ValueValidator<GLenum> read_pixel_format;
+ValueValidator<GLenum> read_pixel_type;
+ValueValidator<GLenum> render_buffer_format;
+ValueValidator<GLenum> render_buffer_parameter;
+ValueValidator<GLenum> render_buffer_target;
+ValueValidator<GLenum> reset_status;
+ValueValidator<GLenum> shader_binary_format;
+ValueValidator<GLenum> shader_parameter;
+ValueValidator<GLenum> shader_precision;
+ValueValidator<GLenum> shader_type;
+ValueValidator<GLenum> src_blend_factor;
+ValueValidator<GLenum> stencil_op;
+ValueValidator<GLenum> string_type;
+ValueValidator<GLenum> texture_bind_target;
+ValueValidator<GLenum> texture_format;
+ValueValidator<GLenum> texture_internal_format;
+ValueValidator<GLenum> texture_internal_format_storage;
+ValueValidator<GLenum> texture_mag_filter_mode;
+ValueValidator<GLenum> texture_min_filter_mode;
+ValueValidator<GLenum> texture_parameter;
+ValueValidator<GLenum> texture_pool;
+ValueValidator<GLenum> texture_target;
+ValueValidator<GLenum> texture_usage;
+ValueValidator<GLenum> texture_wrap_mode;
+ValueValidator<GLint> vertex_attrib_size;
+ValueValidator<GLenum> vertex_attrib_type;
+ValueValidator<GLenum> vertex_attribute;
+ValueValidator<GLenum> vertex_pointer;
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
new file mode 100644
index 0000000..790b9b3
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -0,0 +1,629 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
+
+static const GLenum valid_attachment_table[] = {
+ GL_COLOR_ATTACHMENT0,
+ GL_DEPTH_ATTACHMENT,
+ GL_STENCIL_ATTACHMENT,
+};
+
+static const GLenum valid_backbuffer_attachment_table[] = {
+ GL_COLOR_EXT,
+ GL_DEPTH_EXT,
+ GL_STENCIL_EXT,
+};
+
+static const GLenum valid_blit_filter_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+};
+
+static const GLenum valid_buffer_parameter_table[] = {
+ GL_BUFFER_SIZE,
+ GL_BUFFER_USAGE,
+};
+
+static const GLenum valid_buffer_target_table[] = {
+ GL_ARRAY_BUFFER,
+ GL_ELEMENT_ARRAY_BUFFER,
+};
+
+static const GLenum valid_buffer_usage_table[] = {
+ GL_STREAM_DRAW,
+ GL_STATIC_DRAW,
+ GL_DYNAMIC_DRAW,
+};
+
+static const GLenum valid_capability_table[] = {
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_DITHER,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+};
+
+static const GLenum valid_cmp_function_table[] = {
+ GL_NEVER,
+ GL_LESS,
+ GL_EQUAL,
+ GL_LEQUAL,
+ GL_GREATER,
+ GL_NOTEQUAL,
+ GL_GEQUAL,
+ GL_ALWAYS,
+};
+
+static const GLenum valid_draw_mode_table[] = {
+ GL_POINTS,
+ GL_LINE_STRIP,
+ GL_LINE_LOOP,
+ GL_LINES,
+ GL_TRIANGLE_STRIP,
+ GL_TRIANGLE_FAN,
+ GL_TRIANGLES,
+};
+
+static const GLenum valid_dst_blend_factor_table[] = {
+ GL_ZERO,
+ GL_ONE,
+ GL_SRC_COLOR,
+ GL_ONE_MINUS_SRC_COLOR,
+ GL_DST_COLOR,
+ GL_ONE_MINUS_DST_COLOR,
+ GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA,
+ GL_DST_ALPHA,
+ GL_ONE_MINUS_DST_ALPHA,
+ GL_CONSTANT_COLOR,
+ GL_ONE_MINUS_CONSTANT_COLOR,
+ GL_CONSTANT_ALPHA,
+ GL_ONE_MINUS_CONSTANT_ALPHA,
+};
+
+static const GLenum valid_equation_table[] = {
+ GL_FUNC_ADD,
+ GL_FUNC_SUBTRACT,
+ GL_FUNC_REVERSE_SUBTRACT,
+};
+
+static const GLenum valid_face_mode_table[] = {
+ GL_CW,
+ GL_CCW,
+};
+
+static const GLenum valid_face_type_table[] = {
+ GL_FRONT,
+ GL_BACK,
+ GL_FRONT_AND_BACK,
+};
+
+static const GLenum valid_frame_buffer_parameter_table[] = {
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE,
+};
+
+static const GLenum valid_frame_buffer_target_table[] = {
+ GL_FRAMEBUFFER,
+};
+
+static const GLenum valid_g_l_state_table[] = {
+ GL_ACTIVE_TEXTURE,
+ GL_ALIASED_LINE_WIDTH_RANGE,
+ GL_ALIASED_POINT_SIZE_RANGE,
+ GL_ALPHA_BITS,
+ GL_ARRAY_BUFFER_BINDING,
+ GL_BLUE_BITS,
+ GL_COMPRESSED_TEXTURE_FORMATS,
+ GL_CURRENT_PROGRAM,
+ GL_DEPTH_BITS,
+ GL_DEPTH_RANGE,
+ GL_ELEMENT_ARRAY_BUFFER_BINDING,
+ GL_FRAMEBUFFER_BINDING,
+ GL_GENERATE_MIPMAP_HINT,
+ GL_GREEN_BITS,
+ GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE,
+ GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ GL_MAX_RENDERBUFFER_SIZE,
+ GL_MAX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_TEXTURE_SIZE,
+ GL_MAX_VARYING_VECTORS,
+ GL_MAX_VERTEX_ATTRIBS,
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_VERTEX_UNIFORM_VECTORS,
+ GL_MAX_VIEWPORT_DIMS,
+ GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ GL_NUM_SHADER_BINARY_FORMATS,
+ GL_PACK_ALIGNMENT,
+ GL_RED_BITS,
+ GL_RENDERBUFFER_BINDING,
+ GL_SAMPLE_BUFFERS,
+ GL_SAMPLE_COVERAGE_INVERT,
+ GL_SAMPLE_COVERAGE_VALUE,
+ GL_SAMPLES,
+ GL_SCISSOR_BOX,
+ GL_SHADER_BINARY_FORMATS,
+ GL_SHADER_COMPILER,
+ GL_SUBPIXEL_BITS,
+ GL_STENCIL_BITS,
+ GL_TEXTURE_BINDING_2D,
+ GL_TEXTURE_BINDING_CUBE_MAP,
+ GL_UNPACK_ALIGNMENT,
+ GL_UNPACK_FLIP_Y_CHROMIUM,
+ GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+ GL_BIND_GENERATES_RESOURCE_CHROMIUM,
+ GL_VERTEX_ARRAY_BINDING_OES,
+ GL_VIEWPORT,
+ GL_BLEND_COLOR,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_CLEAR_VALUE,
+ GL_DEPTH_CLEAR_VALUE,
+ GL_STENCIL_CLEAR_VALUE,
+ GL_COLOR_WRITEMASK,
+ GL_CULL_FACE_MODE,
+ GL_DEPTH_FUNC,
+ GL_DEPTH_WRITEMASK,
+ GL_FRONT_FACE,
+ GL_LINE_WIDTH,
+ GL_POLYGON_OFFSET_FACTOR,
+ GL_POLYGON_OFFSET_UNITS,
+ GL_STENCIL_FUNC,
+ GL_STENCIL_REF,
+ GL_STENCIL_VALUE_MASK,
+ GL_STENCIL_BACK_FUNC,
+ GL_STENCIL_BACK_REF,
+ GL_STENCIL_BACK_VALUE_MASK,
+ GL_STENCIL_WRITEMASK,
+ GL_STENCIL_BACK_WRITEMASK,
+ GL_STENCIL_FAIL,
+ GL_STENCIL_PASS_DEPTH_FAIL,
+ GL_STENCIL_PASS_DEPTH_PASS,
+ GL_STENCIL_BACK_FAIL,
+ GL_STENCIL_BACK_PASS_DEPTH_FAIL,
+ GL_STENCIL_BACK_PASS_DEPTH_PASS,
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_DITHER,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+};
+
+static const GLenum valid_get_max_index_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+ GL_UNSIGNED_INT,
+};
+
+static const GLenum valid_get_tex_param_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP,
+};
+
+static const GLenum valid_hint_mode_table[] = {
+ GL_FASTEST,
+ GL_NICEST,
+ GL_DONT_CARE,
+};
+
+static const GLenum valid_hint_target_table[] = {
+ GL_GENERATE_MIPMAP_HINT,
+};
+
+static const GLenum valid_image_internal_format_table[] = {
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_image_usage_table[] = {
+ GL_MAP_CHROMIUM,
+ GL_SCANOUT_CHROMIUM,
+};
+
+static const GLenum valid_index_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+};
+
+static const GLenum valid_matrix_mode_table[] = {
+ GL_PATH_PROJECTION_CHROMIUM,
+ GL_PATH_MODELVIEW_CHROMIUM,
+};
+
+static const GLenum valid_pixel_store_table[] = {
+ GL_PACK_ALIGNMENT,
+ GL_UNPACK_ALIGNMENT,
+ GL_UNPACK_FLIP_Y_CHROMIUM,
+ GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+};
+
+static const GLint valid_pixel_store_alignment_table[] = {
+ 1,
+ 2,
+ 4,
+ 8,
+};
+
+static const GLenum valid_pixel_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+};
+
+static const GLenum valid_program_parameter_table[] = {
+ GL_DELETE_STATUS,
+ GL_LINK_STATUS,
+ GL_VALIDATE_STATUS,
+ GL_INFO_LOG_LENGTH,
+ GL_ATTACHED_SHADERS,
+ GL_ACTIVE_ATTRIBUTES,
+ GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
+ GL_ACTIVE_UNIFORMS,
+ GL_ACTIVE_UNIFORM_MAX_LENGTH,
+};
+
+static const GLenum valid_query_object_parameter_table[] = {
+ GL_QUERY_RESULT_EXT,
+ GL_QUERY_RESULT_AVAILABLE_EXT,
+};
+
+static const GLenum valid_query_parameter_table[] = {
+ GL_CURRENT_QUERY_EXT,
+};
+
+static const GLenum valid_query_target_table[] = {
+ GL_ANY_SAMPLES_PASSED_EXT,
+ GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT,
+ GL_COMMANDS_ISSUED_CHROMIUM,
+ GL_LATENCY_QUERY_CHROMIUM,
+ GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM,
+ GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM,
+ GL_COMMANDS_COMPLETED_CHROMIUM,
+};
+
+static const GLenum valid_read_pixel_format_table[] = {
+ GL_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_read_pixel_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+};
+
+static const GLenum valid_render_buffer_format_table[] = {
+ GL_RGBA4,
+ GL_RGB565,
+ GL_RGB5_A1,
+ GL_DEPTH_COMPONENT16,
+ GL_STENCIL_INDEX8,
+};
+
+static const GLenum valid_render_buffer_parameter_table[] = {
+ GL_RENDERBUFFER_RED_SIZE,
+ GL_RENDERBUFFER_GREEN_SIZE,
+ GL_RENDERBUFFER_BLUE_SIZE,
+ GL_RENDERBUFFER_ALPHA_SIZE,
+ GL_RENDERBUFFER_DEPTH_SIZE,
+ GL_RENDERBUFFER_STENCIL_SIZE,
+ GL_RENDERBUFFER_WIDTH,
+ GL_RENDERBUFFER_HEIGHT,
+ GL_RENDERBUFFER_INTERNAL_FORMAT,
+};
+
+static const GLenum valid_render_buffer_target_table[] = {
+ GL_RENDERBUFFER,
+};
+
+static const GLenum valid_reset_status_table[] = {
+ GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_INNOCENT_CONTEXT_RESET_ARB,
+ GL_UNKNOWN_CONTEXT_RESET_ARB,
+};
+
+static const GLenum valid_shader_parameter_table[] = {
+ GL_SHADER_TYPE,
+ GL_DELETE_STATUS,
+ GL_COMPILE_STATUS,
+ GL_INFO_LOG_LENGTH,
+ GL_SHADER_SOURCE_LENGTH,
+ GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+};
+
+static const GLenum valid_shader_precision_table[] = {
+ GL_LOW_FLOAT,
+ GL_MEDIUM_FLOAT,
+ GL_HIGH_FLOAT,
+ GL_LOW_INT,
+ GL_MEDIUM_INT,
+ GL_HIGH_INT,
+};
+
+static const GLenum valid_shader_type_table[] = {
+ GL_VERTEX_SHADER,
+ GL_FRAGMENT_SHADER,
+};
+
+static const GLenum valid_src_blend_factor_table[] = {
+ GL_ZERO,
+ GL_ONE,
+ GL_SRC_COLOR,
+ GL_ONE_MINUS_SRC_COLOR,
+ GL_DST_COLOR,
+ GL_ONE_MINUS_DST_COLOR,
+ GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA,
+ GL_DST_ALPHA,
+ GL_ONE_MINUS_DST_ALPHA,
+ GL_CONSTANT_COLOR,
+ GL_ONE_MINUS_CONSTANT_COLOR,
+ GL_CONSTANT_ALPHA,
+ GL_ONE_MINUS_CONSTANT_ALPHA,
+ GL_SRC_ALPHA_SATURATE,
+};
+
+static const GLenum valid_stencil_op_table[] = {
+ GL_KEEP,
+ GL_ZERO,
+ GL_REPLACE,
+ GL_INCR,
+ GL_INCR_WRAP,
+ GL_DECR,
+ GL_DECR_WRAP,
+ GL_INVERT,
+};
+
+static const GLenum valid_string_type_table[] = {
+ GL_VENDOR,
+ GL_RENDERER,
+ GL_VERSION,
+ GL_SHADING_LANGUAGE_VERSION,
+ GL_EXTENSIONS,
+};
+
+static const GLenum valid_texture_bind_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP,
+};
+
+static const GLenum valid_texture_format_table[] = {
+ GL_ALPHA,
+ GL_LUMINANCE,
+ GL_LUMINANCE_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_texture_internal_format_table[] = {
+ GL_ALPHA,
+ GL_LUMINANCE,
+ GL_LUMINANCE_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_texture_internal_format_storage_table[] = {
+ GL_RGB565,
+ GL_RGBA4,
+ GL_RGB5_A1,
+ GL_ALPHA8_EXT,
+ GL_LUMINANCE8_EXT,
+ GL_LUMINANCE8_ALPHA8_EXT,
+ GL_RGB8_OES,
+ GL_RGBA8_OES,
+};
+
+static const GLenum valid_texture_mag_filter_mode_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+};
+
+static const GLenum valid_texture_min_filter_mode_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+ GL_NEAREST_MIPMAP_NEAREST,
+ GL_LINEAR_MIPMAP_NEAREST,
+ GL_NEAREST_MIPMAP_LINEAR,
+ GL_LINEAR_MIPMAP_LINEAR,
+};
+
+static const GLenum valid_texture_parameter_table[] = {
+ GL_TEXTURE_MAG_FILTER,
+ GL_TEXTURE_MIN_FILTER,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_WRAP_S,
+ GL_TEXTURE_WRAP_T,
+};
+
+static const GLenum valid_texture_pool_table[] = {
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM,
+};
+
+static const GLenum valid_texture_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+};
+
+static const GLenum valid_texture_usage_table[] = {
+ GL_NONE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE,
+};
+
+static const GLenum valid_texture_wrap_mode_table[] = {
+ GL_CLAMP_TO_EDGE,
+ GL_MIRRORED_REPEAT,
+ GL_REPEAT,
+};
+
+static const GLint valid_vertex_attrib_size_table[] = {
+ 1,
+ 2,
+ 3,
+ 4,
+};
+
+static const GLenum valid_vertex_attrib_type_table[] = {
+ GL_BYTE,
+ GL_UNSIGNED_BYTE,
+ GL_SHORT,
+ GL_UNSIGNED_SHORT,
+ GL_FLOAT,
+};
+
+static const GLenum valid_vertex_attribute_table[] = {
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING,
+ GL_VERTEX_ATTRIB_ARRAY_ENABLED,
+ GL_VERTEX_ATTRIB_ARRAY_SIZE,
+ GL_VERTEX_ATTRIB_ARRAY_STRIDE,
+ GL_VERTEX_ATTRIB_ARRAY_TYPE,
+ GL_CURRENT_VERTEX_ATTRIB,
+};
+
+static const GLenum valid_vertex_pointer_table[] = {
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+};
+
+Validators::Validators()
+ : attachment(valid_attachment_table, arraysize(valid_attachment_table)),
+ backbuffer_attachment(valid_backbuffer_attachment_table,
+ arraysize(valid_backbuffer_attachment_table)),
+ blit_filter(valid_blit_filter_table, arraysize(valid_blit_filter_table)),
+ buffer_parameter(valid_buffer_parameter_table,
+ arraysize(valid_buffer_parameter_table)),
+ buffer_target(valid_buffer_target_table,
+ arraysize(valid_buffer_target_table)),
+ buffer_usage(valid_buffer_usage_table,
+ arraysize(valid_buffer_usage_table)),
+ capability(valid_capability_table, arraysize(valid_capability_table)),
+ cmp_function(valid_cmp_function_table,
+ arraysize(valid_cmp_function_table)),
+ compressed_texture_format(),
+ draw_mode(valid_draw_mode_table, arraysize(valid_draw_mode_table)),
+ dst_blend_factor(valid_dst_blend_factor_table,
+ arraysize(valid_dst_blend_factor_table)),
+ equation(valid_equation_table, arraysize(valid_equation_table)),
+ face_mode(valid_face_mode_table, arraysize(valid_face_mode_table)),
+ face_type(valid_face_type_table, arraysize(valid_face_type_table)),
+ frame_buffer_parameter(valid_frame_buffer_parameter_table,
+ arraysize(valid_frame_buffer_parameter_table)),
+ frame_buffer_target(valid_frame_buffer_target_table,
+ arraysize(valid_frame_buffer_target_table)),
+ g_l_state(valid_g_l_state_table, arraysize(valid_g_l_state_table)),
+ get_max_index_type(valid_get_max_index_type_table,
+ arraysize(valid_get_max_index_type_table)),
+ get_tex_param_target(valid_get_tex_param_target_table,
+ arraysize(valid_get_tex_param_target_table)),
+ hint_mode(valid_hint_mode_table, arraysize(valid_hint_mode_table)),
+ hint_target(valid_hint_target_table, arraysize(valid_hint_target_table)),
+ image_internal_format(valid_image_internal_format_table,
+ arraysize(valid_image_internal_format_table)),
+ image_usage(valid_image_usage_table, arraysize(valid_image_usage_table)),
+ index_type(valid_index_type_table, arraysize(valid_index_type_table)),
+ matrix_mode(valid_matrix_mode_table, arraysize(valid_matrix_mode_table)),
+ pixel_store(valid_pixel_store_table, arraysize(valid_pixel_store_table)),
+ pixel_store_alignment(valid_pixel_store_alignment_table,
+ arraysize(valid_pixel_store_alignment_table)),
+ pixel_type(valid_pixel_type_table, arraysize(valid_pixel_type_table)),
+ program_parameter(valid_program_parameter_table,
+ arraysize(valid_program_parameter_table)),
+ query_object_parameter(valid_query_object_parameter_table,
+ arraysize(valid_query_object_parameter_table)),
+ query_parameter(valid_query_parameter_table,
+ arraysize(valid_query_parameter_table)),
+ query_target(valid_query_target_table,
+ arraysize(valid_query_target_table)),
+ read_pixel_format(valid_read_pixel_format_table,
+ arraysize(valid_read_pixel_format_table)),
+ read_pixel_type(valid_read_pixel_type_table,
+ arraysize(valid_read_pixel_type_table)),
+ render_buffer_format(valid_render_buffer_format_table,
+ arraysize(valid_render_buffer_format_table)),
+ render_buffer_parameter(valid_render_buffer_parameter_table,
+ arraysize(valid_render_buffer_parameter_table)),
+ render_buffer_target(valid_render_buffer_target_table,
+ arraysize(valid_render_buffer_target_table)),
+ reset_status(valid_reset_status_table,
+ arraysize(valid_reset_status_table)),
+ shader_binary_format(),
+ shader_parameter(valid_shader_parameter_table,
+ arraysize(valid_shader_parameter_table)),
+ shader_precision(valid_shader_precision_table,
+ arraysize(valid_shader_precision_table)),
+ shader_type(valid_shader_type_table, arraysize(valid_shader_type_table)),
+ src_blend_factor(valid_src_blend_factor_table,
+ arraysize(valid_src_blend_factor_table)),
+ stencil_op(valid_stencil_op_table, arraysize(valid_stencil_op_table)),
+ string_type(valid_string_type_table, arraysize(valid_string_type_table)),
+ texture_bind_target(valid_texture_bind_target_table,
+ arraysize(valid_texture_bind_target_table)),
+ texture_format(valid_texture_format_table,
+ arraysize(valid_texture_format_table)),
+ texture_internal_format(valid_texture_internal_format_table,
+ arraysize(valid_texture_internal_format_table)),
+ texture_internal_format_storage(
+ valid_texture_internal_format_storage_table,
+ arraysize(valid_texture_internal_format_storage_table)),
+ texture_mag_filter_mode(valid_texture_mag_filter_mode_table,
+ arraysize(valid_texture_mag_filter_mode_table)),
+ texture_min_filter_mode(valid_texture_min_filter_mode_table,
+ arraysize(valid_texture_min_filter_mode_table)),
+ texture_parameter(valid_texture_parameter_table,
+ arraysize(valid_texture_parameter_table)),
+ texture_pool(valid_texture_pool_table,
+ arraysize(valid_texture_pool_table)),
+ texture_target(valid_texture_target_table,
+ arraysize(valid_texture_target_table)),
+ texture_usage(valid_texture_usage_table,
+ arraysize(valid_texture_usage_table)),
+ texture_wrap_mode(valid_texture_wrap_mode_table,
+ arraysize(valid_texture_wrap_mode_table)),
+ vertex_attrib_size(valid_vertex_attrib_size_table,
+ arraysize(valid_vertex_attrib_size_table)),
+ vertex_attrib_type(valid_vertex_attrib_type_table,
+ arraysize(valid_vertex_attrib_type_table)),
+ vertex_attribute(valid_vertex_attribute_table,
+ arraysize(valid_vertex_attribute_table)),
+ vertex_pointer(valid_vertex_pointer_table,
+ arraysize(valid_vertex_pointer_table)) {
+}
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
new file mode 100644
index 0000000..015d808
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -0,0 +1,310 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_switches.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+const int64 kUnscheduleFenceTimeOutDelay = 10000;
+
+#if defined(OS_WIN)
+const int64 kRescheduleTimeOutDelay = 1000;
+#endif
+
+GpuScheduler::GpuScheduler(CommandBufferServiceBase* command_buffer,
+ AsyncAPIInterface* handler,
+ gles2::GLES2Decoder* decoder)
+ : command_buffer_(command_buffer),
+ handler_(handler),
+ decoder_(decoder),
+ unscheduled_count_(0),
+ rescheduled_count_(0),
+ was_preempted_(false),
+ reschedule_task_factory_(this) {}
+
+GpuScheduler::~GpuScheduler() {
+}
+
+void GpuScheduler::PutChanged() {
+ TRACE_EVENT1(
+ "gpu", "GpuScheduler:PutChanged",
+ "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
+
+ CommandBuffer::State state = command_buffer_->GetLastState();
+
+ // If there is no parser, exit.
+ if (!parser_.get()) {
+ DCHECK_EQ(state.get_offset, state.put_offset);
+ return;
+ }
+
+ parser_->set_put(state.put_offset);
+ if (state.error != error::kNoError)
+ return;
+
+ // Check that the GPU has passed all fences.
+ if (!PollUnscheduleFences())
+ return;
+
+ // One of the unschedule fence tasks might have unscheduled us.
+ if (!IsScheduled())
+ return;
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ error::Error error = error::kNoError;
+ if (decoder_)
+ decoder_->BeginDecoding();
+ while (!parser_->IsEmpty()) {
+ if (IsPreempted())
+ break;
+
+ DCHECK(IsScheduled());
+ DCHECK(unschedule_fences_.empty());
+
+ error = parser_->ProcessCommands(CommandParser::kParseCommandsSlice);
+
+ if (error == error::kDeferCommandUntilLater) {
+ DCHECK_GT(unscheduled_count_, 0);
+ break;
+ }
+
+ // TODO(piman): various classes duplicate various pieces of state, leading
+ // to needlessly complex update logic. It should be possible to simply
+ // share the state across all of them.
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+
+ if (error::IsError(error)) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error);
+ break;
+ }
+
+ if (!command_processed_callback_.is_null())
+ command_processed_callback_.Run();
+
+ if (unscheduled_count_ > 0)
+ break;
+ }
+
+ if (decoder_) {
+ if (!error::IsError(error) && decoder_->WasContextLost()) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error::kLostContext);
+ }
+ decoder_->EndDecoding();
+ decoder_->AddProcessingCommandsTime(
+ base::TimeTicks::HighResNow() - begin_time);
+ }
+}
+
+void GpuScheduler::SetScheduled(bool scheduled) {
+ TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
+ "new unscheduled_count_",
+ unscheduled_count_ + (scheduled? -1 : 1));
+ if (scheduled) {
+ // If the scheduler was rescheduled after a timeout, ignore the subsequent
+ // calls to SetScheduled when they eventually arrive until they are all
+ // accounted for.
+ if (rescheduled_count_ > 0) {
+ --rescheduled_count_;
+ return;
+ } else {
+ --unscheduled_count_;
+ }
+
+ DCHECK_GE(unscheduled_count_, 0);
+
+ if (unscheduled_count_ == 0) {
+ TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
+ "GpuScheduler", this);
+ // When the scheduler transitions from the unscheduled to the scheduled
+ // state, cancel the task that would reschedule it after a timeout.
+ reschedule_task_factory_.InvalidateWeakPtrs();
+
+ if (!scheduling_changed_callback_.is_null())
+ scheduling_changed_callback_.Run(true);
+ }
+ } else {
+ ++unscheduled_count_;
+ if (unscheduled_count_ == 1) {
+ TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
+ "GpuScheduler", this);
+#if defined(OS_WIN)
+ if (base::win::GetVersion() < base::win::VERSION_VISTA) {
+ // When the scheduler transitions from scheduled to unscheduled, post a
+ // delayed task that it will force it back into a scheduled state after
+ // a timeout. This should only be necessary on pre-Vista.
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GpuScheduler::RescheduleTimeOut,
+ reschedule_task_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
+ }
+#endif
+ if (!scheduling_changed_callback_.is_null())
+ scheduling_changed_callback_.Run(false);
+ }
+ }
+}
+
+bool GpuScheduler::IsScheduled() {
+ return unscheduled_count_ == 0;
+}
+
+bool GpuScheduler::HasMoreWork() {
+ return !unschedule_fences_.empty() ||
+ (decoder_ && decoder_->ProcessPendingQueries()) ||
+ HasMoreIdleWork();
+}
+
+void GpuScheduler::SetSchedulingChangedCallback(
+ const SchedulingChangedCallback& callback) {
+ scheduling_changed_callback_ = callback;
+}
+
+scoped_refptr<Buffer> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
+ return command_buffer_->GetTransferBuffer(shm_id);
+}
+
+void GpuScheduler::set_token(int32 token) {
+ command_buffer_->SetToken(token);
+}
+
+bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
+ scoped_refptr<Buffer> ring_buffer =
+ command_buffer_->GetTransferBuffer(transfer_buffer_id);
+ if (!ring_buffer.get()) {
+ return false;
+ }
+
+ if (!parser_.get()) {
+ parser_.reset(new CommandParser(handler_));
+ }
+
+ parser_->SetBuffer(
+ ring_buffer->memory(), ring_buffer->size(), 0, ring_buffer->size());
+
+ SetGetOffset(0);
+ return true;
+}
+
+bool GpuScheduler::SetGetOffset(int32 offset) {
+ if (parser_->set_get(offset)) {
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+ return true;
+ }
+ return false;
+}
+
+int32 GpuScheduler::GetGetOffset() {
+ return parser_->get();
+}
+
+void GpuScheduler::SetCommandProcessedCallback(
+ const base::Closure& callback) {
+ command_processed_callback_ = callback;
+}
+
+void GpuScheduler::DeferToFence(base::Closure task) {
+ unschedule_fences_.push(make_linked_ptr(
+ new UnscheduleFence(gfx::GLFence::Create(), task)));
+ SetScheduled(false);
+}
+
+bool GpuScheduler::PollUnscheduleFences() {
+ if (unschedule_fences_.empty())
+ return true;
+
+ if (unschedule_fences_.front()->fence.get()) {
+ base::Time now = base::Time::Now();
+ base::TimeDelta timeout =
+ base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
+
+ while (!unschedule_fences_.empty()) {
+ const UnscheduleFence& fence = *unschedule_fences_.front();
+ if (fence.fence->HasCompleted() ||
+ now - fence.issue_time > timeout) {
+ unschedule_fences_.front()->task.Run();
+ unschedule_fences_.pop();
+ SetScheduled(true);
+ } else {
+ return false;
+ }
+ }
+ } else {
+ glFinish();
+
+ while (!unschedule_fences_.empty()) {
+ unschedule_fences_.front()->task.Run();
+ unschedule_fences_.pop();
+ SetScheduled(true);
+ }
+ }
+
+ return true;
+}
+
+bool GpuScheduler::IsPreempted() {
+ if (!preemption_flag_.get())
+ return false;
+
+ if (!was_preempted_ && preemption_flag_->IsSet()) {
+ TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
+ was_preempted_ = true;
+ } else if (was_preempted_ && !preemption_flag_->IsSet()) {
+ TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
+ was_preempted_ = false;
+ }
+
+ return preemption_flag_->IsSet();
+}
+
+bool GpuScheduler::HasMoreIdleWork() {
+ return (decoder_ && decoder_->HasMoreIdleWork());
+}
+
+void GpuScheduler::PerformIdleWork() {
+ if (!decoder_)
+ return;
+ decoder_->PerformIdleWork();
+}
+
+void GpuScheduler::RescheduleTimeOut() {
+ int new_count = unscheduled_count_ + rescheduled_count_;
+
+ rescheduled_count_ = 0;
+
+ while (unscheduled_count_)
+ SetScheduled(true);
+
+ rescheduled_count_ = new_count;
+}
+
+GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
+ base::Closure task_)
+ : fence(fence_),
+ issue_time(base::Time::Now()),
+ task(task_) {
+}
+
+GpuScheduler::UnscheduleFence::~UnscheduleFence() {
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
new file mode 100644
index 0000000..0390632
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
+
+#include <queue>
+
+#include "base/atomic_ref_count.h"
+#include "base/atomicops.h"
+#include "base/callback.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GLFence;
+}
+
+namespace gpu {
+
+class PreemptionFlag
+ : public base::RefCountedThreadSafe<PreemptionFlag> {
+ public:
+ PreemptionFlag() : flag_(0) {}
+
+ bool IsSet() { return !base::AtomicRefCountIsZero(&flag_); }
+ void Set() { base::AtomicRefCountInc(&flag_); }
+ void Reset() { base::subtle::NoBarrier_Store(&flag_, 0); }
+
+ private:
+ base::AtomicRefCount flag_;
+
+ ~PreemptionFlag() {}
+
+ friend class base::RefCountedThreadSafe<PreemptionFlag>;
+};
+
+// This class schedules commands that have been flushed. They are received via
+// a command buffer and forwarded to a command parser. TODO(apatrick): This
+// class should not know about the decoder. Do not add additional dependencies
+// on it.
+class GPU_EXPORT GpuScheduler
+ : NON_EXPORTED_BASE(public CommandBufferEngine),
+ public base::SupportsWeakPtr<GpuScheduler> {
+ public:
+ GpuScheduler(CommandBufferServiceBase* command_buffer,
+ AsyncAPIInterface* handler,
+ gles2::GLES2Decoder* decoder);
+
+ virtual ~GpuScheduler();
+
+ void PutChanged();
+
+ void SetPreemptByFlag(scoped_refptr<PreemptionFlag> flag) {
+ preemption_flag_ = flag;
+ }
+
+ // Sets whether commands should be processed by this scheduler. Setting to
+ // false unschedules. Setting to true reschedules. Whether or not the
+ // scheduler is currently scheduled is "reference counted". Every call with
+ // false must eventually be paired by a call with true.
+ void SetScheduled(bool is_scheduled);
+
+ // Returns whether the scheduler is currently able to process more commands.
+ bool IsScheduled();
+
+ // Returns whether the scheduler needs to be polled again in the future.
+ bool HasMoreWork();
+
+ typedef base::Callback<void(bool /* scheduled */)> SchedulingChangedCallback;
+
+ // Sets a callback that is invoked just before scheduler is rescheduled
+ // or descheduled. Takes ownership of callback object.
+ void SetSchedulingChangedCallback(const SchedulingChangedCallback& callback);
+
+ // Implementation of CommandBufferEngine.
+ virtual scoped_refptr<Buffer> GetSharedMemoryBuffer(int32 shm_id) OVERRIDE;
+ virtual void set_token(int32 token) OVERRIDE;
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) OVERRIDE;
+ virtual bool SetGetOffset(int32 offset) OVERRIDE;
+ virtual int32 GetGetOffset() OVERRIDE;
+
+ void SetCommandProcessedCallback(const base::Closure& callback);
+
+ void DeferToFence(base::Closure task);
+
+ // Polls the fences, invoking callbacks that were waiting to be triggered
+ // by them and returns whether all fences were complete.
+ bool PollUnscheduleFences();
+
+ bool HasMoreIdleWork();
+ void PerformIdleWork();
+
+ CommandParser* parser() const {
+ return parser_.get();
+ }
+
+ bool IsPreempted();
+
+ private:
+ // Artificially reschedule if the scheduler is still unscheduled after a
+ // timeout.
+ void RescheduleTimeOut();
+
+ // The GpuScheduler holds a weak reference to the CommandBuffer. The
+ // CommandBuffer owns the GpuScheduler and holds a strong reference to it
+ // through the ProcessCommands callback.
+ CommandBufferServiceBase* command_buffer_;
+
+ // The parser uses this to execute commands.
+ AsyncAPIInterface* handler_;
+
+ // Does not own decoder. TODO(apatrick): The GpuScheduler shouldn't need a
+ // pointer to the decoder, it is only used to initialize the CommandParser,
+ // which could be an argument to the constructor, and to determine the
+ // reason for context lost.
+ gles2::GLES2Decoder* decoder_;
+
+ // TODO(apatrick): The GpuScheduler currently creates and owns the parser.
+ // This should be an argument to the constructor.
+ scoped_ptr<CommandParser> parser_;
+
+ // Greater than zero if this is waiting to be rescheduled before continuing.
+ int unscheduled_count_;
+
+ // The number of times this scheduler has been artificially rescheduled on
+ // account of a timeout.
+ int rescheduled_count_;
+
+ // The GpuScheduler will unschedule itself in the event that further GL calls
+ // are issued to it before all these fences have been crossed by the GPU.
+ struct UnscheduleFence {
+ UnscheduleFence(gfx::GLFence* fence, base::Closure task);
+ ~UnscheduleFence();
+
+ scoped_ptr<gfx::GLFence> fence;
+ base::Time issue_time;
+ base::Closure task;
+ };
+ std::queue<linked_ptr<UnscheduleFence> > unschedule_fences_;
+
+ SchedulingChangedCallback scheduling_changed_callback_;
+ base::Closure descheduled_callback_;
+ base::Closure command_processed_callback_;
+
+ // If non-NULL and |preemption_flag_->IsSet()|, exit PutChanged early.
+ scoped_refptr<PreemptionFlag> preemption_flag_;
+ bool was_preempted_;
+
+ // A factory for outstanding rescheduling tasks that is invalidated whenever
+ // the scheduler is rescheduled.
+ base::WeakPtrFactory<GpuScheduler> reschedule_task_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuScheduler);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler_mock.h b/gpu/command_buffer/service/gpu_scheduler_mock.h
new file mode 100644
index 0000000..ed308e0
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler_mock.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
+
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockGpuScheduler : public GpuScheduler {
+ public:
+ explicit MockGpuScheduler(CommandBuffer* command_buffer)
+ : GpuScheduler(command_buffer) {
+ }
+
+ MOCK_METHOD1(GetSharedMemoryBuffer, Buffer(int32 shm_id));
+ MOCK_METHOD1(set_token, void(int32 token));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockGpuScheduler);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler_unittest.cc b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
new file mode 100644
index 0000000..c658d2b
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
@@ -0,0 +1,221 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/common/command_buffer_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+using testing::_;
+using testing::DoAll;
+using testing::Invoke;
+using testing::NiceMock;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+
+namespace gpu {
+
+const size_t kRingBufferSize = 1024;
+const size_t kRingBufferEntries = kRingBufferSize / sizeof(CommandBufferEntry);
+
+class GpuSchedulerTest : public testing::Test {
+ protected:
+ static const int32 kTransferBufferId = 123;
+
+ virtual void SetUp() {
+ scoped_ptr<base::SharedMemory> shared_memory(new ::base::SharedMemory);
+ shared_memory->CreateAndMapAnonymous(kRingBufferSize);
+ buffer_ = static_cast<int32*>(shared_memory->memory());
+ shared_memory_buffer_ =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), kRingBufferSize);
+ memset(buffer_, 0, kRingBufferSize);
+
+ command_buffer_.reset(new MockCommandBuffer);
+
+ CommandBuffer::State default_state;
+ default_state.num_entries = kRingBufferEntries;
+ ON_CALL(*command_buffer_.get(), GetLastState())
+ .WillByDefault(Return(default_state));
+
+ decoder_.reset(new gles2::MockGLES2Decoder());
+ // Install FakeDoCommands handler so we can use individual DoCommand()
+ // expectations.
+ EXPECT_CALL(*decoder_, DoCommands(_, _, _, _)).WillRepeatedly(
+ Invoke(decoder_.get(), &gles2::MockGLES2Decoder::FakeDoCommands));
+
+ scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
+ decoder_.get(),
+ decoder_.get()));
+ EXPECT_CALL(*command_buffer_, GetTransferBuffer(kTransferBufferId))
+ .WillOnce(Return(shared_memory_buffer_));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(0));
+ EXPECT_TRUE(scheduler_->SetGetBuffer(kTransferBufferId));
+ }
+
+ virtual void TearDown() {
+ // Ensure that any unexpected tasks posted by the GPU scheduler are executed
+ // in order to fail the test.
+ base::MessageLoop::current()->RunUntilIdle();
+ }
+
+ error::Error GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop;
+ scoped_ptr<MockCommandBuffer> command_buffer_;
+ scoped_refptr<Buffer> shared_memory_buffer_;
+ int32* buffer_;
+ scoped_ptr<gles2::MockGLES2Decoder> decoder_;
+ scoped_ptr<GpuScheduler> scheduler_;
+};
+
+TEST_F(GpuSchedulerTest, SchedulerDoesNothingIfRingBufferIsEmpty) {
+ CommandBuffer::State state;
+
+ state.put_offset = 0;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*command_buffer_, SetParseError(_))
+ .Times(0);
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, GetSetBuffer) {
+ CommandBuffer::State state;
+
+ // Set the get offset to something not 0.
+ EXPECT_CALL(*command_buffer_, SetGetOffset(2));
+ scheduler_->SetGetOffset(2);
+ EXPECT_EQ(2, scheduler_->GetGetOffset());
+
+ // Set the buffer.
+ EXPECT_CALL(*command_buffer_, GetTransferBuffer(kTransferBufferId))
+ .WillOnce(Return(shared_memory_buffer_));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(0));
+ EXPECT_TRUE(scheduler_->SetGetBuffer(kTransferBufferId));
+
+ // Check the get offset was reset.
+ EXPECT_EQ(0, scheduler_->GetGetOffset());
+}
+
+TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 2;
+ buffer_[1] = 123;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 2;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(2));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 1, &buffer_[0]))
+ .WillOnce(Return(error::kNoError));
+
+ EXPECT_CALL(*command_buffer_, SetParseError(_))
+ .Times(0);
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 2;
+ buffer_[1] = 123;
+ header[2].command = 8;
+ header[2].size = 1;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 3;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 1, &buffer_[0]))
+ .WillOnce(Return(error::kNoError));
+
+ EXPECT_CALL(*decoder_, DoCommand(8, 0, &buffer_[2]))
+ .WillOnce(Return(error::kNoError));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(3));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 1;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 1;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 0, &buffer_[0]))
+ .WillOnce(Return(
+ error::kUnknownCommand));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(1));
+
+ EXPECT_CALL(*command_buffer_, SetContextLostReason(_));
+ EXPECT_CALL(*decoder_, GetContextLostReason())
+ .WillOnce(Return(error::kUnknown));
+ EXPECT_CALL(*command_buffer_,
+ SetParseError(error::kUnknownCommand));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
+ CommandBuffer::State state;
+ state.error = error::kGenericError;
+
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, CanGetAddressOfSharedMemory) {
+ EXPECT_CALL(*command_buffer_.get(), GetTransferBuffer(7))
+ .WillOnce(Return(shared_memory_buffer_));
+
+ EXPECT_EQ(&buffer_[0], scheduler_->GetSharedMemoryBuffer(7)->memory());
+}
+
+ACTION_P2(SetPointee, address, value) {
+ *address = value;
+}
+
+TEST_F(GpuSchedulerTest, CanGetSizeOfSharedMemory) {
+ EXPECT_CALL(*command_buffer_.get(), GetTransferBuffer(7))
+ .WillOnce(Return(shared_memory_buffer_));
+
+ EXPECT_EQ(kRingBufferSize, scheduler_->GetSharedMemoryBuffer(7)->size());
+}
+
+TEST_F(GpuSchedulerTest, SetTokenForwardsToCommandBuffer) {
+ EXPECT_CALL(*command_buffer_, SetToken(7));
+ scheduler_->set_token(7);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_service_test.cc b/gpu/command_buffer/service/gpu_service_test.cc
new file mode 100644
index 0000000..a7c9db1
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_service_test.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_service_test.h"
+
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+namespace gles2 {
+
+GpuServiceTest::GpuServiceTest() : ran_setup_(false), ran_teardown_(false) {
+}
+
+GpuServiceTest::~GpuServiceTest() {
+ DCHECK(ran_teardown_);
+}
+
+void GpuServiceTest::SetUpWithGLVersion(const char* gl_version,
+ const char* gl_extensions) {
+ testing::Test::SetUp();
+
+ gfx::SetGLGetProcAddressProc(gfx::MockGLInterface::GetGLProcAddress);
+ gfx::GLSurface::InitializeOneOffWithMockBindingsForTests();
+ gl_.reset(new ::testing::StrictMock< ::gfx::MockGLInterface>());
+ ::gfx::MockGLInterface::SetGLInterface(gl_.get());
+
+ context_ = new gfx::GLContextStubWithExtensions;
+ context_->AddExtensionsString(gl_extensions);
+ context_->SetGLVersionString(gl_version);
+ gfx::GLSurface::InitializeDynamicMockBindingsForTests(context_.get());
+ ran_setup_ = true;
+}
+
+void GpuServiceTest::SetUp() {
+ SetUpWithGLVersion("2.0", NULL);
+}
+
+void GpuServiceTest::TearDown() {
+ DCHECK(ran_setup_);
+ ::gfx::MockGLInterface::SetGLInterface(NULL);
+ gl_.reset();
+ gfx::ClearGLBindings();
+ ran_teardown_ = true;
+
+ testing::Test::TearDown();
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_service_test.h b/gpu/command_buffer/service/gpu_service_test.h
new file mode 100644
index 0000000..c467c14
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_service_test.h
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SERVICE_TEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SERVICE_TEST_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gfx {
+class GLContextStubWithExtensions;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// Base class for tests that need mock GL bindings.
+class GpuServiceTest : public testing::Test {
+ public:
+ GpuServiceTest();
+ virtual ~GpuServiceTest();
+
+ protected:
+ void SetUpWithGLVersion(const char* gl_version, const char* gl_extensions);
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ scoped_ptr< ::testing::StrictMock< ::gfx::MockGLInterface> > gl_;
+
+ private:
+ bool ran_setup_;
+ bool ran_teardown_;
+ scoped_refptr<gfx::GLContextStubWithExtensions> context_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
diff --git a/gpu/command_buffer/service/gpu_state_tracer.cc b/gpu/command_buffer/service/gpu_state_tracer.cc
new file mode 100644
index 0000000..6eb5007
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_state_tracer.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_state_tracer.h"
+
+#include "base/base64.h"
+#include "base/debug/trace_event.h"
+#include "context_state.h"
+#include "ui/gfx/codec/png_codec.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+namespace {
+
+const int kBytesPerPixel = 4;
+
+class Snapshot : public base::debug::ConvertableToTraceFormat {
+ public:
+ static scoped_refptr<Snapshot> Create(const ContextState* state);
+
+ // Save a screenshot of the currently bound framebuffer.
+ bool SaveScreenshot(const gfx::Size& size);
+
+ // base::debug::ConvertableToTraceFormat implementation.
+ virtual void AppendAsTraceFormat(std::string* out) const OVERRIDE;
+
+ private:
+ explicit Snapshot(const ContextState* state);
+ virtual ~Snapshot() {}
+
+ const ContextState* state_;
+
+ std::vector<unsigned char> screenshot_pixels_;
+ gfx::Size screenshot_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(Snapshot);
+};
+
+} // namespace
+
+Snapshot::Snapshot(const ContextState* state) : state_(state) {}
+
+scoped_refptr<Snapshot> Snapshot::Create(const ContextState* state) {
+ return scoped_refptr<Snapshot>(new Snapshot(state));
+}
+
+bool Snapshot::SaveScreenshot(const gfx::Size& size) {
+ screenshot_size_ = size;
+ screenshot_pixels_.resize(screenshot_size_.width() *
+ screenshot_size_.height() * kBytesPerPixel);
+
+ glPixelStorei(GL_PACK_ALIGNMENT, kBytesPerPixel);
+ glReadPixels(0,
+ 0,
+ screenshot_size_.width(),
+ screenshot_size_.height(),
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &screenshot_pixels_[0]);
+ glPixelStorei(GL_PACK_ALIGNMENT, state_->pack_alignment);
+
+ // Flip the screenshot vertically.
+ int bytes_per_row = screenshot_size_.width() * kBytesPerPixel;
+ for (int y = 0; y < screenshot_size_.height() / 2; y++) {
+ for (int x = 0; x < bytes_per_row; x++) {
+ std::swap(screenshot_pixels_[y * bytes_per_row + x],
+ screenshot_pixels_
+ [(screenshot_size_.height() - y - 1) * bytes_per_row + x]);
+ }
+ }
+ return true;
+}
+
+void Snapshot::AppendAsTraceFormat(std::string* out) const {
+ *out += "{";
+ if (screenshot_pixels_.size()) {
+ std::vector<unsigned char> png_data;
+ int bytes_per_row = screenshot_size_.width() * kBytesPerPixel;
+ bool png_ok = gfx::PNGCodec::Encode(&screenshot_pixels_[0],
+ gfx::PNGCodec::FORMAT_RGBA,
+ screenshot_size_,
+ bytes_per_row,
+ false,
+ std::vector<gfx::PNGCodec::Comment>(),
+ &png_data);
+ DCHECK(png_ok);
+
+ base::StringPiece base64_input(reinterpret_cast<const char*>(&png_data[0]),
+ png_data.size());
+ std::string base64_output;
+ Base64Encode(base64_input, &base64_output);
+
+ *out += "\"screenshot\":\"" + base64_output + "\"";
+ }
+ *out += "}";
+}
+
+scoped_ptr<GPUStateTracer> GPUStateTracer::Create(const ContextState* state) {
+ return scoped_ptr<GPUStateTracer>(new GPUStateTracer(state));
+}
+
+GPUStateTracer::GPUStateTracer(const ContextState* state) : state_(state) {
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"), "gpu::State", state_);
+}
+
+GPUStateTracer::~GPUStateTracer() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"), "gpu::State", state_);
+}
+
+void GPUStateTracer::TakeSnapshotWithCurrentFramebuffer(const gfx::Size& size) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ "GPUStateTracer::TakeSnapshotWithCurrentFramebuffer");
+
+ scoped_refptr<Snapshot> snapshot(Snapshot::Create(state_));
+
+ // Only save a screenshot for now.
+ if (!snapshot->SaveScreenshot(size))
+ return;
+
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ "gpu::State",
+ state_,
+ scoped_refptr<base::debug::ConvertableToTraceFormat>(snapshot));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_state_tracer.h b/gpu/command_buffer/service/gpu_state_tracer.h
new file mode 100644
index 0000000..38998f3
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_state_tracer.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+
+namespace gfx {
+class Size;
+}
+
+namespace gpu {
+namespace gles2 {
+
+struct ContextState;
+
+// Saves GPU state such as framebuffer contents while tracing.
+class GPUStateTracer {
+ public:
+ static scoped_ptr<GPUStateTracer> Create(const ContextState* state);
+ ~GPUStateTracer();
+
+ // Take a state snapshot with a screenshot of the currently bound framebuffer.
+ void TakeSnapshotWithCurrentFramebuffer(const gfx::Size& size);
+
+ private:
+ explicit GPUStateTracer(const ContextState* state);
+
+ const ContextState* state_;
+ DISALLOW_COPY_AND_ASSIGN(GPUStateTracer);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
diff --git a/gpu/command_buffer/service/gpu_switches.cc b/gpu/command_buffer/service/gpu_switches.cc
new file mode 100644
index 0000000..0491c41
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_switches.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "base/basictypes.h"
+
+namespace switches {
+
+// Always return success when compiling a shader. Linking will still fail.
+const char kCompileShaderAlwaysSucceeds[] = "compile-shader-always-succeeds";
+
+// Disable the GL error log limit.
+const char kDisableGLErrorLimit[] = "disable-gl-error-limit";
+
+// Disable the GLSL translator.
+const char kDisableGLSLTranslator[] = "disable-glsl-translator";
+
+// Disable workarounds for various GPU driver bugs.
+const char kDisableGpuDriverBugWorkarounds[] =
+ "disable-gpu-driver-bug-workarounds";
+
+// Turn off user-defined name hashing in shaders.
+const char kDisableShaderNameHashing[] = "disable-shader-name-hashing";
+
+// Turn on Logging GPU commands.
+const char kEnableGPUCommandLogging[] = "enable-gpu-command-logging";
+
+// Turn on Calling GL Error after every command.
+const char kEnableGPUDebugging[] = "enable-gpu-debugging";
+
+// Enable GPU service logging. Note: This is the same switch as the one in
+// gl_switches.cc. It's defined here again to avoid dependencies between
+// dlls.
+const char kEnableGPUServiceLoggingGPU[] = "enable-gpu-service-logging";
+
+// Turn off gpu program caching
+const char kDisableGpuProgramCache[] = "disable-gpu-program-cache";
+
+// Enforce GL minimums.
+const char kEnforceGLMinimums[] = "enforce-gl-minimums";
+
+// Sets the total amount of memory that may be allocated for GPU resources
+const char kForceGpuMemAvailableMb[] = "force-gpu-mem-available-mb";
+
+// Pass a set of GpuDriverBugWorkaroundType ids, seperated by ','.
+const char kGpuDriverBugWorkarounds[] = "gpu-driver-bug-workarounds";
+
+// Sets the maximum size of the in-memory gpu program cache, in kb
+const char kGpuProgramCacheSizeKb[] = "gpu-program-cache-size-kb";
+
+// Disables the GPU shader on disk cache.
+const char kDisableGpuShaderDiskCache[] = "disable-gpu-shader-disk-cache";
+
+// Allows async texture uploads (off main thread) via GL context sharing.
+const char kEnableShareGroupAsyncTextureUpload[] =
+ "enable-share-group-async-texture-upload";
+
+const char* kGpuSwitches[] = {
+ kCompileShaderAlwaysSucceeds,
+ kDisableGLErrorLimit,
+ kDisableGLSLTranslator,
+ kDisableGpuDriverBugWorkarounds,
+ kDisableShaderNameHashing,
+ kEnableGPUCommandLogging,
+ kEnableGPUDebugging,
+ kEnableGPUServiceLoggingGPU,
+ kDisableGpuProgramCache,
+ kEnforceGLMinimums,
+ kForceGpuMemAvailableMb,
+ kGpuDriverBugWorkarounds,
+ kGpuProgramCacheSizeKb,
+ kDisableGpuShaderDiskCache,
+ kEnableShareGroupAsyncTextureUpload,
+};
+
+const int kNumGpuSwitches = arraysize(kGpuSwitches);
+
+} // namespace switches
diff --git a/gpu/command_buffer/service/gpu_switches.h b/gpu/command_buffer/service/gpu_switches.h
new file mode 100644
index 0000000..d582b7a
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_switches.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/command_buffer/service/.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+
+#include "gpu/gpu_export.h"
+
+namespace switches {
+
+GPU_EXPORT extern const char kCompileShaderAlwaysSucceeds[];
+GPU_EXPORT extern const char kDisableGLErrorLimit[];
+GPU_EXPORT extern const char kDisableGLSLTranslator[];
+GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[];
+GPU_EXPORT extern const char kDisableShaderNameHashing[];
+GPU_EXPORT extern const char kEnableGPUCommandLogging[];
+GPU_EXPORT extern const char kEnableGPUDebugging[];
+GPU_EXPORT extern const char kEnableGPUServiceLoggingGPU[];
+GPU_EXPORT extern const char kDisableGpuProgramCache[];
+GPU_EXPORT extern const char kEnforceGLMinimums[];
+GPU_EXPORT extern const char kForceGpuMemAvailableMb[];
+GPU_EXPORT extern const char kGpuDriverBugWorkarounds[];
+GPU_EXPORT extern const char kGpuProgramCacheSizeKb[];
+GPU_EXPORT extern const char kDisableGpuShaderDiskCache[];
+GPU_EXPORT extern const char kEnableShareGroupAsyncTextureUpload[];
+
+GPU_EXPORT extern const char* kGpuSwitches[];
+GPU_EXPORT extern const int kNumGpuSwitches;
+
+} // namespace switches
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+
diff --git a/gpu/command_buffer/service/gpu_tracer.cc b/gpu/command_buffer/service/gpu_tracer.cc
new file mode 100644
index 0000000..024e4b6
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer.cc
@@ -0,0 +1,407 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_tracer.h"
+
+#include <deque>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+static const unsigned int kProcessInterval = 16;
+static TraceOutputter* g_outputter_thread = NULL;
+
+TraceMarker::TraceMarker(const std::string& name)
+ : name_(name),
+ trace_(NULL) {
+}
+
+TraceMarker::~TraceMarker() {
+}
+
+scoped_refptr<TraceOutputter> TraceOutputter::Create(const std::string& name) {
+ if (!g_outputter_thread) {
+ g_outputter_thread = new TraceOutputter(name);
+ }
+ return g_outputter_thread;
+}
+
+TraceOutputter::TraceOutputter(const std::string& name)
+ : named_thread_(name.c_str()), local_trace_id_(0) {
+ named_thread_.Start();
+ named_thread_.Stop();
+}
+
+TraceOutputter::~TraceOutputter() { g_outputter_thread = NULL; }
+
+void TraceOutputter::Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) {
+ TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"),
+ name.c_str(),
+ local_trace_id_,
+ named_thread_.thread_id(),
+ start_time);
+ TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"),
+ name.c_str(),
+ local_trace_id_,
+ named_thread_.thread_id(),
+ end_time);
+ ++local_trace_id_;
+}
+
+GPUTrace::GPUTrace(scoped_refptr<Outputter> outputter,
+ const std::string& name,
+ int64 offset,
+ GpuTracerType tracer_type)
+ : name_(name),
+ outputter_(outputter),
+ offset_(offset),
+ start_time_(0),
+ end_time_(0),
+ tracer_type_(tracer_type),
+ end_requested_(false) {
+ memset(queries_, 0, sizeof(queries_));
+ switch (tracer_type_) {
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ glGenQueriesARB(2, queries_);
+ break;
+
+ default:
+ tracer_type_ = kTracerTypeInvalid;
+ }
+}
+
+GPUTrace::~GPUTrace() {
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ glDeleteQueriesARB(2, queries_);
+ break;
+ }
+}
+
+void GPUTrace::Start() {
+ TRACE_EVENT_COPY_ASYNC_BEGIN0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"), name().c_str(), this);
+
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeDisjointTimer:
+ // For the disjoint timer, GPU idle time does not seem to increment the
+ // internal counter. We must calculate the offset before any query. The
+ // good news is any device that supports disjoint timer will also support
+ // glGetInteger64v, so we can query it directly unlike the ARBTimer case.
+ // The "offset_" variable will always be 0 during normal use cases, only
+ // under the unit tests will it be set to specific test values.
+ if (offset_ == 0) {
+ GLint64 gl_now = 0;
+ glGetInteger64v(GL_TIMESTAMP, &gl_now);
+ offset_ = base::TimeTicks::NowFromSystemTraceTime().ToInternalValue() -
+ gl_now / base::Time::kNanosecondsPerMicrosecond;
+ }
+ // Intentionally fall through to kTracerTypeARBTimer case.xs
+ case kTracerTypeARBTimer:
+ // GL_TIMESTAMP and GL_TIMESTAMP_EXT both have the same value.
+ glQueryCounter(queries_[0], GL_TIMESTAMP);
+ break;
+ }
+}
+
+void GPUTrace::End() {
+ end_requested_ = true;
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ // GL_TIMESTAMP and GL_TIMESTAMP_EXT both have the same value.
+ glQueryCounter(queries_[1], GL_TIMESTAMP);
+ break;
+ }
+
+ TRACE_EVENT_COPY_ASYNC_END0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"), name().c_str(), this);
+}
+
+bool GPUTrace::IsAvailable() {
+ if (tracer_type_ != kTracerTypeInvalid) {
+ if (!end_requested_)
+ return false;
+
+ GLint done = 0;
+ glGetQueryObjectiv(queries_[1], GL_QUERY_RESULT_AVAILABLE, &done);
+ return !!done;
+ }
+
+ return true;
+}
+
+void GPUTrace::Process() {
+ if (tracer_type_ == kTracerTypeInvalid)
+ return;
+
+ DCHECK(IsAvailable());
+
+ GLuint64 begin_stamp = 0;
+ GLuint64 end_stamp = 0;
+
+ // TODO(dsinclair): It's possible for the timer to wrap during the start/end.
+ // We need to detect if the end is less then the start and correct for the
+ // wrapping.
+ glGetQueryObjectui64v(queries_[0], GL_QUERY_RESULT, &begin_stamp);
+ glGetQueryObjectui64v(queries_[1], GL_QUERY_RESULT, &end_stamp);
+
+ start_time_ = (begin_stamp / base::Time::kNanosecondsPerMicrosecond) +
+ offset_;
+ end_time_ = (end_stamp / base::Time::kNanosecondsPerMicrosecond) + offset_;
+ outputter_->Trace(name(), start_time_, end_time_);
+}
+
+GPUTracer::GPUTracer(gles2::GLES2Decoder* decoder)
+ : gpu_trace_srv_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"))),
+ gpu_trace_dev_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"))),
+ decoder_(decoder),
+ timer_offset_(0),
+ last_tracer_source_(kTraceGroupInvalid),
+ tracer_type_(kTracerTypeInvalid),
+ gpu_timing_synced_(false),
+ gpu_executing_(false),
+ process_posted_(false) {
+ if (gfx::g_driver_gl.ext.b_GL_EXT_disjoint_timer_query) {
+ tracer_type_ = kTracerTypeDisjointTimer;
+ outputter_ = TraceOutputter::Create("GL_EXT_disjoint_timer_query");
+ } else if (gfx::g_driver_gl.ext.b_GL_ARB_timer_query) {
+ tracer_type_ = kTracerTypeARBTimer;
+ outputter_ = TraceOutputter::Create("GL_ARB_timer_query");
+ }
+}
+
+GPUTracer::~GPUTracer() {
+}
+
+bool GPUTracer::BeginDecoding() {
+ if (gpu_executing_)
+ return false;
+
+ CalculateTimerOffset();
+ gpu_executing_ = true;
+
+ if (IsTracing()) {
+ // Reset disjoint bit for the disjoint timer.
+ if (tracer_type_ == kTracerTypeDisjointTimer) {
+ GLint disjoint_value = 0;
+ glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_value);
+ }
+
+ // Begin a Trace for all active markers
+ for (int n = 0; n < NUM_TRACER_SOURCES; n++) {
+ for (size_t i = 0; i < markers_[n].size(); i++) {
+ markers_[n][i].trace_ = CreateTrace(markers_[n][i].name_);
+ markers_[n][i].trace_->Start();
+ }
+ }
+ }
+ return true;
+}
+
+bool GPUTracer::EndDecoding() {
+ if (!gpu_executing_)
+ return false;
+
+ // End Trace for all active markers
+ if (IsTracing()) {
+ for (int n = 0; n < NUM_TRACER_SOURCES; n++) {
+ for (size_t i = 0; i < markers_[n].size(); i++) {
+ if (markers_[n][i].trace_.get()) {
+ markers_[n][i].trace_->End();
+ if (markers_[n][i].trace_->IsEnabled())
+ traces_.push_back(markers_[n][i].trace_);
+ markers_[n][i].trace_ = 0;
+ }
+ }
+ }
+ IssueProcessTask();
+ }
+
+ gpu_executing_ = false;
+
+ // NOTE(vmiura): glFlush() here can help give better trace results,
+ // but it distorts the normal device behavior.
+ return true;
+}
+
+bool GPUTracer::Begin(const std::string& name, GpuTracerSource source) {
+ if (!gpu_executing_)
+ return false;
+
+ DCHECK(source >= 0 && source < NUM_TRACER_SOURCES);
+
+ // Push new marker from given 'source'
+ last_tracer_source_ = source;
+ markers_[source].push_back(TraceMarker(name));
+
+ // Create trace
+ if (IsTracing()) {
+ scoped_refptr<GPUTrace> trace = CreateTrace(name);
+ trace->Start();
+ markers_[source].back().trace_ = trace;
+ }
+
+ return true;
+}
+
+bool GPUTracer::End(GpuTracerSource source) {
+ if (!gpu_executing_)
+ return false;
+
+ DCHECK(source >= 0 && source < NUM_TRACER_SOURCES);
+
+ // Pop last marker with matching 'source'
+ if (!markers_[source].empty()) {
+ if (IsTracing()) {
+ scoped_refptr<GPUTrace> trace = markers_[source].back().trace_;
+ if (trace.get()) {
+ trace->End();
+ if (trace->IsEnabled())
+ traces_.push_back(trace);
+ IssueProcessTask();
+ }
+ }
+
+ markers_[source].pop_back();
+ return true;
+ }
+ return false;
+}
+
+bool GPUTracer::IsTracing() {
+ return (*gpu_trace_srv_category != 0) || (*gpu_trace_dev_category != 0);
+}
+
+const std::string& GPUTracer::CurrentName() const {
+ if (last_tracer_source_ >= 0 &&
+ last_tracer_source_ < NUM_TRACER_SOURCES &&
+ !markers_[last_tracer_source_].empty()) {
+ return markers_[last_tracer_source_].back().name_;
+ }
+ return base::EmptyString();
+}
+
+scoped_refptr<GPUTrace> GPUTracer::CreateTrace(const std::string& name) {
+ GpuTracerType tracer_type = *gpu_trace_dev_category ? tracer_type_ :
+ kTracerTypeInvalid;
+
+ return new GPUTrace(outputter_, name, timer_offset_, tracer_type);
+}
+
+void GPUTracer::Process() {
+ process_posted_ = false;
+ ProcessTraces();
+ IssueProcessTask();
+}
+
+void GPUTracer::ProcessTraces() {
+ if (tracer_type_ == kTracerTypeInvalid) {
+ traces_.clear();
+ return;
+ }
+
+ TRACE_EVENT0("gpu", "GPUTracer::ProcessTraces");
+
+ // Make owning decoder's GL context current
+ if (!decoder_->MakeCurrent()) {
+ // Skip subsequent GL calls if MakeCurrent fails
+ traces_.clear();
+ return;
+ }
+
+ // Check if disjoint operation has occurred, discard ongoing traces if so.
+ if (tracer_type_ == kTracerTypeDisjointTimer) {
+ GLint disjoint_value = 0;
+ glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_value);
+ if (disjoint_value)
+ traces_.clear();
+ }
+
+ while (!traces_.empty() && traces_.front()->IsAvailable()) {
+ traces_.front()->Process();
+ traces_.pop_front();
+ }
+
+ // Clear pending traces if there were are any errors
+ GLenum err = glGetError();
+ if (err != GL_NO_ERROR)
+ traces_.clear();
+}
+
+void GPUTracer::CalculateTimerOffset() {
+ if (tracer_type_ != kTracerTypeInvalid) {
+ if (*gpu_trace_dev_category == '\0') {
+ // If GPU device category is off, invalidate timing sync.
+ gpu_timing_synced_ = false;
+ return;
+ } else if (tracer_type_ == kTracerTypeDisjointTimer) {
+ // Disjoint timers offsets should be calculated before every query.
+ gpu_timing_synced_ = true;
+ timer_offset_ = 0;
+ }
+
+ if (gpu_timing_synced_)
+ return;
+
+ TRACE_EVENT0("gpu", "GPUTracer::CalculateTimerOffset");
+
+ // NOTE(vmiura): It would be better to use glGetInteger64v, however
+ // it's not available everywhere.
+ GLuint64 gl_now = 0;
+ GLuint query;
+
+ glGenQueriesARB(1, &query);
+
+ glFinish();
+ glQueryCounter(query, GL_TIMESTAMP);
+ glFinish();
+
+ glGetQueryObjectui64v(query, GL_QUERY_RESULT, &gl_now);
+ glDeleteQueriesARB(1, &query);
+
+ base::TimeTicks system_now = base::TimeTicks::NowFromSystemTraceTime();
+
+ gl_now /= base::Time::kNanosecondsPerMicrosecond;
+ timer_offset_ = system_now.ToInternalValue() - gl_now;
+ gpu_timing_synced_ = true;
+ }
+}
+
+void GPUTracer::IssueProcessTask() {
+ if (traces_.empty() || process_posted_)
+ return;
+
+ process_posted_ = true;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GPUTracer::Process, base::AsWeakPtr(this)),
+ base::TimeDelta::FromMilliseconds(kProcessInterval));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_tracer.h b/gpu/command_buffer/service/gpu_tracer.h
new file mode 100644
index 0000000..63e5646
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer.h
@@ -0,0 +1,173 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GPUTrace class.
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Outputter;
+class GPUTrace;
+
+// Id used to keep trace namespaces separate
+enum GpuTracerSource {
+ kTraceGroupInvalid = -1,
+
+ kTraceGroupMarker = 0,
+ kTraceCHROMIUM = 1,
+ kTraceDecoder = 2,
+
+ NUM_TRACER_SOURCES
+};
+
+enum GpuTracerType {
+ kTracerTypeInvalid = -1,
+
+ kTracerTypeARBTimer,
+ kTracerTypeDisjointTimer
+};
+
+// Marker structure for a Trace.
+struct TraceMarker {
+ TraceMarker(const std::string& name);
+ ~TraceMarker();
+
+ std::string name_;
+ scoped_refptr<GPUTrace> trace_;
+};
+
+// Traces GPU Commands.
+class GPUTracer : public base::SupportsWeakPtr<GPUTracer> {
+ public:
+ explicit GPUTracer(gles2::GLES2Decoder* decoder);
+ ~GPUTracer();
+
+ // Scheduled processing in decoder begins.
+ bool BeginDecoding();
+
+ // Scheduled processing in decoder ends.
+ bool EndDecoding();
+
+ // Begin a trace marker.
+ bool Begin(const std::string& name, GpuTracerSource source);
+
+ // End the last started trace marker.
+ bool End(GpuTracerSource source);
+
+ bool IsTracing();
+
+ // Retrieve the name of the current open trace.
+ // Returns empty string if no current open trace.
+ const std::string& CurrentName() const;
+
+ private:
+ // Trace Processing.
+ scoped_refptr<GPUTrace> CreateTrace(const std::string& name);
+ void Process();
+ void ProcessTraces();
+
+ void CalculateTimerOffset();
+ void IssueProcessTask();
+
+ scoped_refptr<Outputter> outputter_;
+ std::vector<TraceMarker> markers_[NUM_TRACER_SOURCES];
+ std::deque<scoped_refptr<GPUTrace> > traces_;
+
+ const unsigned char* gpu_trace_srv_category;
+ const unsigned char* gpu_trace_dev_category;
+ gles2::GLES2Decoder* decoder_;
+
+ int64 timer_offset_;
+ GpuTracerSource last_tracer_source_;
+
+ GpuTracerType tracer_type_;
+ bool gpu_timing_synced_;
+ bool gpu_executing_;
+ bool process_posted_;
+
+ DISALLOW_COPY_AND_ASSIGN(GPUTracer);
+};
+
+class Outputter : public base::RefCounted<Outputter> {
+ public:
+ virtual void Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) = 0;
+
+ protected:
+ virtual ~Outputter() {}
+ friend class base::RefCounted<Outputter>;
+};
+
+class TraceOutputter : public Outputter {
+ public:
+ static scoped_refptr<TraceOutputter> Create(const std::string& name);
+ virtual void Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) OVERRIDE;
+
+ protected:
+ friend class base::RefCounted<Outputter>;
+ explicit TraceOutputter(const std::string& name);
+ virtual ~TraceOutputter();
+
+ base::Thread named_thread_;
+ uint64 local_trace_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceOutputter);
+};
+
+class GPU_EXPORT GPUTrace
+ : public base::RefCounted<GPUTrace> {
+ public:
+ GPUTrace(scoped_refptr<Outputter> outputter,
+ const std::string& name,
+ int64 offset,
+ GpuTracerType tracer_type);
+
+ bool IsEnabled() { return tracer_type_ != kTracerTypeInvalid; }
+ const std::string& name() { return name_; }
+
+ void Start();
+ void End();
+ bool IsAvailable();
+ void Process();
+
+ private:
+ ~GPUTrace();
+
+ void Output();
+
+ friend class base::RefCounted<GPUTrace>;
+
+ std::string name_;
+ scoped_refptr<Outputter> outputter_;
+
+ int64 offset_;
+ int64 start_time_;
+ int64 end_time_;
+ GpuTracerType tracer_type_;
+ bool end_requested_;
+
+ GLuint queries_[2];
+
+ DISALLOW_COPY_AND_ASSIGN(GPUTrace);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
diff --git a/gpu/command_buffer/service/gpu_tracer_unittest.cc b/gpu/command_buffer/service/gpu_tracer_unittest.cc
new file mode 100644
index 0000000..fe91f70
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer_unittest.cc
@@ -0,0 +1,237 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <map>
+#include <set>
+
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+using ::testing::ReturnRef;
+using ::testing::ReturnPointee;
+using ::testing::NotNull;
+using ::testing::ElementsAreArray;
+using ::testing::ElementsAre;
+using ::testing::SetArrayArgument;
+using ::testing::AtLeast;
+using ::testing::SetArgPointee;
+using ::testing::Pointee;
+using ::testing::Unused;
+using ::testing::Invoke;
+using ::testing::_;
+
+class MockOutputter : public Outputter {
+ public:
+ MockOutputter() {}
+ MOCK_METHOD3(Trace,
+ void(const std::string& name, int64 start_time, int64 end_time));
+
+ protected:
+ ~MockOutputter() {}
+};
+
+class GlFakeQueries {
+ public:
+ GlFakeQueries() {}
+
+ void Reset() {
+ current_time_ = 0;
+ next_query_id_ = 23;
+ alloced_queries_.clear();
+ query_timestamp_.clear();
+ }
+
+ void SetCurrentGLTime(GLint64 current_time) { current_time_ = current_time; }
+
+ void GenQueriesARB(GLsizei n, GLuint* ids) {
+ for (GLsizei i = 0; i < n; i++) {
+ ids[i] = next_query_id_++;
+ alloced_queries_.insert(ids[i]);
+ }
+ }
+
+ void DeleteQueriesARB(GLsizei n, const GLuint* ids) {
+ for (GLsizei i = 0; i < n; i++) {
+ alloced_queries_.erase(ids[i]);
+ query_timestamp_.erase(ids[i]);
+ }
+ }
+
+ void GetQueryObjectiv(GLuint id, GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_QUERY_RESULT_AVAILABLE: {
+ std::map<GLuint, GLint64>::iterator it = query_timestamp_.find(id);
+ if (it != query_timestamp_.end() && it->second <= current_time_)
+ *params = 1;
+ else
+ *params = 0;
+ break;
+ }
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ void QueryCounter(GLuint id, GLenum target) {
+ switch (target) {
+ case GL_TIMESTAMP:
+ ASSERT_TRUE(alloced_queries_.find(id) != alloced_queries_.end());
+ query_timestamp_[id] = current_time_;
+ break;
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ void GetQueryObjectui64v(GLuint id, GLenum pname, GLuint64* params) {
+ switch (pname) {
+ case GL_QUERY_RESULT:
+ ASSERT_TRUE(query_timestamp_.find(id) != query_timestamp_.end());
+ *params = query_timestamp_.find(id)->second;
+ break;
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ protected:
+ GLint64 current_time_;
+ GLuint next_query_id_;
+ std::set<GLuint> alloced_queries_;
+ std::map<GLuint, GLint64> query_timestamp_;
+};
+
+class BaseGpuTracerTest : public GpuServiceTest {
+ public:
+ BaseGpuTracerTest() {}
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void DoTraceTest() {
+ MockOutputter* outputter = new MockOutputter();
+ scoped_refptr<Outputter> outputter_ref = outputter;
+
+ SetupTimerQueryMocks();
+
+ // Expected results
+ const std::string trace_name("trace_test");
+ const int64 offset_time = 3231;
+ const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond;
+ const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond;
+ const int64 expect_start_time =
+ (start_timestamp / base::Time::kNanosecondsPerMicrosecond) +
+ offset_time;
+ const int64 expect_end_time =
+ (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time;
+
+ // Expected Outputter::Trace call
+ EXPECT_CALL(*outputter,
+ Trace(trace_name, expect_start_time, expect_end_time));
+
+ scoped_refptr<GPUTrace> trace =
+ new GPUTrace(outputter_ref, trace_name, offset_time,
+ GetTracerType());
+
+ gl_fake_queries_.SetCurrentGLTime(start_timestamp);
+ trace->Start();
+
+ // Shouldn't be available before End() call
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp);
+ EXPECT_FALSE(trace->IsAvailable());
+
+ trace->End();
+
+ // Shouldn't be available until the queries complete
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp -
+ base::Time::kNanosecondsPerMicrosecond);
+ EXPECT_FALSE(trace->IsAvailable());
+
+ // Now it should be available
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp);
+ EXPECT_TRUE(trace->IsAvailable());
+
+ // Proces should output expected Trace results to MockOutputter
+ trace->Process();
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ gl_fake_queries_.Reset();
+ }
+
+ virtual void TearDown() {
+ gl_.reset();
+ gl_fake_queries_.Reset();
+ GpuServiceTest::TearDown();
+ }
+
+ virtual void SetupTimerQueryMocks() {
+ // Delegate query APIs used by GPUTrace to a GlFakeQueries
+ EXPECT_CALL(*gl_, GenQueriesARB(_, NotNull())).Times(AtLeast(1)).WillOnce(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GenQueriesARB));
+
+ EXPECT_CALL(*gl_, GetQueryObjectiv(_, GL_QUERY_RESULT_AVAILABLE, NotNull()))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GetQueryObjectiv));
+
+ EXPECT_CALL(*gl_, QueryCounter(_, GL_TIMESTAMP))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::QueryCounter));
+
+ EXPECT_CALL(*gl_, GetQueryObjectui64v(_, GL_QUERY_RESULT, NotNull()))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GetQueryObjectui64v));
+
+ EXPECT_CALL(*gl_, DeleteQueriesARB(2, NotNull()))
+ .Times(AtLeast(1))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::DeleteQueriesARB));
+ }
+
+ virtual GpuTracerType GetTracerType() = 0;
+
+ GlFakeQueries gl_fake_queries_;
+};
+
+class GpuARBTimerTracerTest : public BaseGpuTracerTest {
+ protected:
+ virtual GpuTracerType GetTracerType() OVERRIDE {
+ return kTracerTypeARBTimer;
+ }
+};
+
+class GpuDisjointTimerTracerTest : public BaseGpuTracerTest {
+ protected:
+ virtual GpuTracerType GetTracerType() OVERRIDE {
+ return kTracerTypeDisjointTimer;
+ }
+};
+
+TEST_F(GpuARBTimerTracerTest, GPUTrace) {
+ // Test basic timer query functionality
+ {
+ DoTraceTest();
+ }
+}
+
+TEST_F(GpuDisjointTimerTracerTest, GPUTrace) {
+ // Test basic timer query functionality
+ {
+ DoTraceTest();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/id_manager.cc b/gpu/command_buffer/service/id_manager.cc
new file mode 100644
index 0000000..be60d7b
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/id_manager.h"
+#include "base/logging.h"
+
+namespace gpu {
+namespace gles2 {
+
+IdManager::IdManager() {}
+
+IdManager::~IdManager() {}
+
+bool IdManager::AddMapping(GLuint client_id, GLuint service_id) {
+ std::pair<MapType::iterator, bool> result = id_map_.insert(
+ std::make_pair(client_id, service_id));
+ return result.second;
+}
+
+bool IdManager::RemoveMapping(GLuint client_id, GLuint service_id) {
+ MapType::iterator iter = id_map_.find(client_id);
+ if (iter != id_map_.end() && iter->second == service_id) {
+ id_map_.erase(iter);
+ return true;
+ }
+ return false;
+}
+
+bool IdManager::GetServiceId(GLuint client_id, GLuint* service_id) {
+ DCHECK(service_id);
+ MapType::iterator iter = id_map_.find(client_id);
+ if (iter != id_map_.end()) {
+ *service_id = iter->second;
+ return true;
+ }
+ return false;
+}
+
+bool IdManager::GetClientId(GLuint service_id, GLuint* client_id) {
+ DCHECK(client_id);
+ MapType::iterator end(id_map_.end());
+ for (MapType::iterator iter(id_map_.begin());
+ iter != end;
+ ++iter) {
+ if (iter->second == service_id) {
+ *client_id = iter->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/id_manager.h b/gpu/command_buffer/service/id_manager.h
new file mode 100644
index 0000000..0bc0674
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class maps one set of ids to another.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT IdManager {
+ public:
+ IdManager();
+ ~IdManager();
+
+ // Maps a client_id to a service_id. Return false if the client_id or
+ // service_id are already mapped to something else.
+ bool AddMapping(GLuint client_id, GLuint service_id);
+
+ // Unmaps a pair of ids. Returns false if the pair were not previously mapped.
+ bool RemoveMapping(GLuint client_id, GLuint service_id);
+
+ // Gets the corresponding service_id for the given client_id.
+ // Returns false if there is no corresponding service_id.
+ bool GetServiceId(GLuint client_id, GLuint* service_id);
+
+ // Gets the corresponding client_id for the given service_id.
+ // Returns false if there is no corresponding client_id.
+ bool GetClientId(GLuint service_id, GLuint* client_id);
+
+ private:
+ typedef base::hash_map<GLuint, GLuint> MapType;
+ MapType id_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/id_manager_unittest.cc b/gpu/command_buffer/service/id_manager_unittest.cc
new file mode 100644
index 0000000..015a442
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/id_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class IdManagerTest : public testing::Test {
+ public:
+ IdManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+
+ IdManager manager_;
+};
+
+TEST_F(IdManagerTest, Basic) {
+ const GLuint kClientId1 = 1;
+ const GLuint kClientId2 = 2;
+ const GLuint kServiceId1 = 201;
+ const GLuint kServiceId2 = 202;
+ // Check we can add an id
+ EXPECT_TRUE(manager_.AddMapping(kClientId1, kServiceId1));
+ // Check we can get that mapping
+ GLuint service_id = 0;
+ EXPECT_TRUE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_EQ(kServiceId1, service_id);
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(kServiceId1, &client_id));
+ EXPECT_EQ(kClientId1, client_id);
+ // Check that it fails if we get a non-existent id.
+ service_id = 0;
+ client_id = 0;
+ EXPECT_FALSE(manager_.GetServiceId(kClientId2, &service_id));
+ EXPECT_FALSE(manager_.GetClientId(kServiceId2, &client_id));
+ EXPECT_EQ(0u, service_id);
+ EXPECT_EQ(0u, client_id);
+ // Check we can add a second id.
+ EXPECT_TRUE(manager_.AddMapping(kClientId2, kServiceId2));
+ // Check we can get that mapping
+ service_id = 0;
+ EXPECT_TRUE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_EQ(kServiceId1, service_id);
+ EXPECT_TRUE(manager_.GetServiceId(kClientId2, &service_id));
+ EXPECT_EQ(kServiceId2, service_id);
+ client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(kServiceId1, &client_id));
+ EXPECT_EQ(kClientId1, client_id);
+ EXPECT_TRUE(manager_.GetClientId(kServiceId2, &client_id));
+ EXPECT_EQ(kClientId2, client_id);
+ // Check if we remove an id we can no longer get it.
+ EXPECT_TRUE(manager_.RemoveMapping(kClientId1, kServiceId1));
+ EXPECT_FALSE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_FALSE(manager_.GetClientId(kServiceId1, &client_id));
+ // Check we get an error if we try to remove a non-existent id.
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId1, kServiceId1));
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId2, kServiceId1));
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId1, kServiceId2));
+ // Check we get an error if we try to map an existing id.
+ EXPECT_FALSE(manager_.AddMapping(kClientId2, kServiceId2));
+ EXPECT_FALSE(manager_.AddMapping(kClientId2, kServiceId1));
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/image_manager.cc b/gpu/command_buffer/service/image_manager.cc
new file mode 100644
index 0000000..46438c7
--- /dev/null
+++ b/gpu/command_buffer/service/image_manager.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/image_manager.h"
+
+#include "base/logging.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+namespace gles2 {
+
+ImageManager::ImageManager() {
+}
+
+ImageManager::~ImageManager() {
+}
+
+void ImageManager::Destroy(bool have_context) {
+ for (GLImageMap::const_iterator iter = images_.begin(); iter != images_.end();
+ ++iter)
+ iter->second.get()->Destroy(have_context);
+ images_.clear();
+}
+
+void ImageManager::AddImage(gfx::GLImage* image, int32 service_id) {
+ DCHECK(images_.find(service_id) == images_.end());
+ images_[service_id] = image;
+}
+
+void ImageManager::RemoveImage(int32 service_id) {
+ GLImageMap::iterator iter = images_.find(service_id);
+ DCHECK(iter != images_.end());
+ iter->second.get()->Destroy(true);
+ images_.erase(iter);
+}
+
+gfx::GLImage* ImageManager::LookupImage(int32 service_id) {
+ GLImageMap::const_iterator iter = images_.find(service_id);
+ if (iter != images_.end())
+ return iter->second.get();
+
+ return NULL;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/image_manager.h b/gpu/command_buffer/service/image_manager.h
new file mode 100644
index 0000000..0a440f9
--- /dev/null
+++ b/gpu/command_buffer/service/image_manager.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// This class keeps track of the images and their state.
+class GPU_EXPORT ImageManager {
+ public:
+ ImageManager();
+ ~ImageManager();
+
+ void Destroy(bool have_context);
+ void AddImage(gfx::GLImage* image, int32 service_id);
+ void RemoveImage(int32 service_id);
+ gfx::GLImage* LookupImage(int32 service_id);
+
+ private:
+ typedef base::hash_map<int32, scoped_refptr<gfx::GLImage> > GLImageMap;
+ GLImageMap images_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageManager);
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc
new file mode 100644
index 0000000..45199a7
--- /dev/null
+++ b/gpu/command_buffer/service/in_process_command_buffer.cc
@@ -0,0 +1,778 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/in_process_command_buffer.h"
+
+#include <queue>
+#include <set>
+#include <utility>
+
+#include <GLES2/gl2.h>
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES 1
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_share_group.h"
+
+#if defined(OS_ANDROID)
+#include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
+#include "ui/gl/android/surface_texture.h"
+#endif
+
+namespace gpu {
+
+namespace {
+
+template <typename T>
+static void RunTaskWithResult(base::Callback<T(void)> task,
+ T* result,
+ base::WaitableEvent* completion) {
+ *result = task.Run();
+ completion->Signal();
+}
+
+class GpuInProcessThread
+ : public base::Thread,
+ public InProcessCommandBuffer::Service,
+ public base::RefCountedThreadSafe<GpuInProcessThread> {
+ public:
+ GpuInProcessThread();
+
+ virtual void AddRef() const OVERRIDE {
+ base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
+ }
+ virtual void Release() const OVERRIDE {
+ base::RefCountedThreadSafe<GpuInProcessThread>::Release();
+ }
+
+ virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
+ virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
+ virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
+ virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
+ OVERRIDE;
+
+ private:
+ virtual ~GpuInProcessThread();
+ friend class base::RefCountedThreadSafe<GpuInProcessThread>;
+
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
+ DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
+};
+
+GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
+ Start();
+}
+
+GpuInProcessThread::~GpuInProcessThread() {
+ Stop();
+}
+
+void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
+ message_loop()->PostTask(FROM_HERE, task);
+}
+
+void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
+ message_loop()->PostDelayedTask(
+ FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
+}
+
+scoped_refptr<gles2::ShaderTranslatorCache>
+GpuInProcessThread::shader_translator_cache() {
+ if (!shader_translator_cache_.get())
+ shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
+ return shader_translator_cache_;
+}
+
+base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
+ LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<base::Lock> default_thread_clients_lock_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+class ScopedEvent {
+ public:
+ ScopedEvent(base::WaitableEvent* event) : event_(event) {}
+ ~ScopedEvent() { event_->Signal(); }
+
+ private:
+ base::WaitableEvent* event_;
+};
+
+class SyncPointManager {
+ public:
+ SyncPointManager();
+ ~SyncPointManager();
+
+ uint32 GenerateSyncPoint();
+ void RetireSyncPoint(uint32 sync_point);
+
+ bool IsSyncPointPassed(uint32 sync_point);
+ void WaitSyncPoint(uint32 sync_point);
+
+private:
+ // This lock protects access to pending_sync_points_ and next_sync_point_ and
+ // is used with the ConditionVariable to signal when a sync point is retired.
+ base::Lock lock_;
+ std::set<uint32> pending_sync_points_;
+ uint32 next_sync_point_;
+ base::ConditionVariable cond_var_;
+};
+
+SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
+
+SyncPointManager::~SyncPointManager() {
+ DCHECK_EQ(pending_sync_points_.size(), 0U);
+}
+
+uint32 SyncPointManager::GenerateSyncPoint() {
+ base::AutoLock lock(lock_);
+ uint32 sync_point = next_sync_point_++;
+ DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
+ pending_sync_points_.insert(sync_point);
+ return sync_point;
+}
+
+void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ DCHECK(pending_sync_points_.count(sync_point));
+ pending_sync_points_.erase(sync_point);
+ cond_var_.Broadcast();
+}
+
+bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ return pending_sync_points_.count(sync_point) == 0;
+}
+
+void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ while (pending_sync_points_.count(sync_point)) {
+ cond_var_.Wait();
+ }
+}
+
+base::LazyInstance<SyncPointManager> g_sync_point_manager =
+ LAZY_INSTANCE_INITIALIZER;
+
+bool WaitSyncPoint(uint32 sync_point) {
+ g_sync_point_manager.Get().WaitSyncPoint(sync_point);
+ return true;
+}
+
+} // anonyous namespace
+
+InProcessCommandBuffer::Service::Service() {}
+
+InProcessCommandBuffer::Service::~Service() {}
+
+scoped_refptr<gles2::MailboxManager>
+InProcessCommandBuffer::Service::mailbox_manager() {
+ if (!mailbox_manager_.get())
+ mailbox_manager_ = new gles2::MailboxManager();
+ return mailbox_manager_;
+}
+
+scoped_refptr<InProcessCommandBuffer::Service>
+InProcessCommandBuffer::GetDefaultService() {
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ scoped_refptr<Service> service;
+ if (!default_thread_clients_.Get().empty()) {
+ InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
+ service = other->service_;
+ DCHECK(service.get());
+ } else {
+ service = new GpuInProcessThread;
+ }
+ return service;
+}
+
+InProcessCommandBuffer::InProcessCommandBuffer(
+ const scoped_refptr<Service>& service)
+ : context_lost_(false),
+ idle_work_pending_(false),
+ last_put_offset_(-1),
+ flush_event_(false, false),
+ service_(service.get() ? service : GetDefaultService()),
+ gpu_thread_weak_ptr_factory_(this) {
+ if (!service.get()) {
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ default_thread_clients_.Get().insert(this);
+ }
+}
+
+InProcessCommandBuffer::~InProcessCommandBuffer() {
+ Destroy();
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ default_thread_clients_.Get().erase(this);
+}
+
+void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
+ CheckSequencedThread();
+ DCHECK(!surface_->IsOffscreen());
+ surface_->Resize(size);
+}
+
+bool InProcessCommandBuffer::MakeCurrent() {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+
+ if (!context_lost_ && decoder_->MakeCurrent())
+ return true;
+ DLOG(ERROR) << "Context lost because MakeCurrent failed.";
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(gpu::error::kLostContext);
+ return false;
+}
+
+void InProcessCommandBuffer::PumpCommands() {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+
+ if (!MakeCurrent())
+ return;
+
+ gpu_scheduler_->PutChanged();
+}
+
+bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+ command_buffer_->SetGetBuffer(transfer_buffer_id);
+ return true;
+}
+
+bool InProcessCommandBuffer::Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ const base::Closure& context_lost_callback,
+ InProcessCommandBuffer* share_group) {
+ DCHECK(!share_group || service_.get() == share_group->service_.get());
+ context_lost_callback_ = WrapCallback(context_lost_callback);
+
+ if (surface.get()) {
+ // GPU thread must be the same as client thread due to GLSurface not being
+ // thread safe.
+ sequence_checker_.reset(new base::SequenceChecker);
+ surface_ = surface;
+ }
+
+ gpu::Capabilities capabilities;
+ InitializeOnGpuThreadParams params(is_offscreen,
+ window,
+ size,
+ attribs,
+ gpu_preference,
+ &capabilities,
+ share_group);
+
+ base::Callback<bool(void)> init_task =
+ base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
+ base::Unretained(this),
+ params);
+
+ base::WaitableEvent completion(true, false);
+ bool result = false;
+ QueueTask(
+ base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
+ completion.Wait();
+
+ if (result)
+ capabilities_ = capabilities;
+
+ return result;
+}
+
+bool InProcessCommandBuffer::InitializeOnGpuThread(
+ const InitializeOnGpuThreadParams& params) {
+ CheckSequencedThread();
+ gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
+
+ DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
+
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ manager->Initialize();
+
+ scoped_ptr<CommandBufferService> command_buffer(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ command_buffer->SetPutOffsetChangeCallback(base::Bind(
+ &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
+ command_buffer->SetParseErrorCallback(base::Bind(
+ &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
+
+ if (!command_buffer->Initialize()) {
+ LOG(ERROR) << "Could not initialize command buffer.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ gl_share_group_ = params.context_group
+ ? params.context_group->gl_share_group_.get()
+ : new gfx::GLShareGroup;
+
+#if defined(OS_ANDROID)
+ stream_texture_manager_.reset(new StreamTextureManagerInProcess);
+#endif
+
+ bool bind_generates_resource = false;
+ decoder_.reset(gles2::GLES2Decoder::Create(
+ params.context_group
+ ? params.context_group->decoder_->GetContextGroup()
+ : new gles2::ContextGroup(service_->mailbox_manager(),
+ NULL,
+ service_->shader_translator_cache(),
+ NULL,
+ bind_generates_resource)));
+
+ gpu_scheduler_.reset(
+ new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
+ command_buffer->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_ = command_buffer.Pass();
+
+ decoder_->set_engine(gpu_scheduler_.get());
+
+ if (!surface_.get()) {
+ if (params.is_offscreen)
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
+ else
+ surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
+ }
+
+ if (!surface_.get()) {
+ LOG(ERROR) << "Could not create GLSurface.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ if (service_->UseVirtualizedGLContexts()) {
+ context_ = gl_share_group_->GetSharedContext();
+ if (!context_.get()) {
+ context_ = gfx::GLContext::CreateGLContext(
+ gl_share_group_.get(), surface_.get(), params.gpu_preference);
+ gl_share_group_->SetSharedContext(context_.get());
+ }
+
+ context_ = new GLContextVirtual(
+ gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
+ if (context_->Initialize(surface_.get(), params.gpu_preference)) {
+ VLOG(1) << "Created virtual GL context.";
+ } else {
+ context_ = NULL;
+ }
+ } else {
+ context_ = gfx::GLContext::CreateGLContext(
+ gl_share_group_.get(), surface_.get(), params.gpu_preference);
+ }
+
+ if (!context_.get()) {
+ LOG(ERROR) << "Could not create GLContext.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ if (!context_->MakeCurrent(surface_.get())) {
+ LOG(ERROR) << "Could not make context current.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ gles2::DisallowedFeatures disallowed_features;
+ disallowed_features.gpu_memory_manager = true;
+ if (!decoder_->Initialize(surface_,
+ context_,
+ params.is_offscreen,
+ params.size,
+ disallowed_features,
+ params.attribs)) {
+ LOG(ERROR) << "Could not initialize decoder.";
+ DestroyOnGpuThread();
+ return false;
+ }
+ *params.capabilities = decoder_->GetCapabilities();
+
+ if (!params.is_offscreen) {
+ decoder_->SetResizeCallback(base::Bind(
+ &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
+ }
+ decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
+
+ return true;
+}
+
+void InProcessCommandBuffer::Destroy() {
+ CheckSequencedThread();
+
+ base::WaitableEvent completion(true, false);
+ bool result = false;
+ base::Callback<bool(void)> destroy_task = base::Bind(
+ &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
+ QueueTask(
+ base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
+ completion.Wait();
+}
+
+bool InProcessCommandBuffer::DestroyOnGpuThread() {
+ CheckSequencedThread();
+ gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
+ command_buffer_.reset();
+ // Clean up GL resources if possible.
+ bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
+ if (decoder_) {
+ decoder_->Destroy(have_context);
+ decoder_.reset();
+ }
+ context_ = NULL;
+ surface_ = NULL;
+ gl_share_group_ = NULL;
+#if defined(OS_ANDROID)
+ stream_texture_manager_.reset();
+#endif
+
+ return true;
+}
+
+void InProcessCommandBuffer::CheckSequencedThread() {
+ DCHECK(!sequence_checker_ ||
+ sequence_checker_->CalledOnValidSequencedThread());
+}
+
+void InProcessCommandBuffer::OnContextLost() {
+ CheckSequencedThread();
+ if (!context_lost_callback_.is_null()) {
+ context_lost_callback_.Run();
+ context_lost_callback_.Reset();
+ }
+
+ context_lost_ = true;
+}
+
+CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
+ CheckSequencedThread();
+ base::AutoLock lock(state_after_last_flush_lock_);
+ if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state_after_last_flush_;
+ return last_state_;
+}
+
+CommandBuffer::State InProcessCommandBuffer::GetLastState() {
+ CheckSequencedThread();
+ return last_state_;
+}
+
+int32 InProcessCommandBuffer::GetLastToken() {
+ CheckSequencedThread();
+ GetStateFast();
+ return last_state_.token;
+}
+
+void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
+ CheckSequencedThread();
+ ScopedEvent handle_flush(&flush_event_);
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->Flush(put_offset);
+ {
+ // Update state before signaling the flush event.
+ base::AutoLock lock(state_after_last_flush_lock_);
+ state_after_last_flush_ = command_buffer_->GetLastState();
+ }
+ DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
+ (error::IsError(state_after_last_flush_.error) && context_lost_));
+
+ // If we've processed all pending commands but still have pending queries,
+ // pump idle work until the query is passed.
+ if (put_offset == state_after_last_flush_.get_offset &&
+ gpu_scheduler_->HasMoreWork()) {
+ ScheduleIdleWorkOnGpuThread();
+ }
+}
+
+void InProcessCommandBuffer::PerformIdleWork() {
+ CheckSequencedThread();
+ idle_work_pending_ = false;
+ base::AutoLock lock(command_buffer_lock_);
+ if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
+ gpu_scheduler_->PerformIdleWork();
+ ScheduleIdleWorkOnGpuThread();
+ }
+}
+
+void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
+ CheckSequencedThread();
+ if (idle_work_pending_)
+ return;
+ idle_work_pending_ = true;
+ service_->ScheduleIdleWork(
+ base::Bind(&InProcessCommandBuffer::PerformIdleWork,
+ gpu_thread_weak_ptr_));
+}
+
+void InProcessCommandBuffer::Flush(int32 put_offset) {
+ CheckSequencedThread();
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ if (last_put_offset_ == put_offset)
+ return;
+
+ last_put_offset_ = put_offset;
+ base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
+ gpu_thread_weak_ptr_,
+ put_offset);
+ QueueTask(task);
+}
+
+void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
+ CheckSequencedThread();
+ while (!InRange(start, end, GetLastToken()) &&
+ last_state_.error == gpu::error::kNoError)
+ flush_event_.Wait();
+}
+
+void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
+ CheckSequencedThread();
+
+ GetStateFast();
+ while (!InRange(start, end, last_state_.get_offset) &&
+ last_state_.error == gpu::error::kNoError) {
+ flush_event_.Wait();
+ GetStateFast();
+ }
+}
+
+void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
+ CheckSequencedThread();
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ {
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->SetGetBuffer(shm_id);
+ last_put_offset_ = 0;
+ }
+ {
+ base::AutoLock lock(state_after_last_flush_lock_);
+ state_after_last_flush_ = command_buffer_->GetLastState();
+ }
+}
+
+scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
+ int32* id) {
+ CheckSequencedThread();
+ base::AutoLock lock(command_buffer_lock_);
+ return command_buffer_->CreateTransferBuffer(size, id);
+}
+
+void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
+ CheckSequencedThread();
+ base::Closure task =
+ base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
+ base::Unretained(this),
+ id);
+
+ QueueTask(task);
+}
+
+void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->DestroyTransferBuffer(id);
+}
+
+gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
+ return capabilities_;
+}
+
+gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
+ size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) {
+ NOTREACHED();
+ return NULL;
+}
+
+void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
+ NOTREACHED();
+}
+
+uint32 InProcessCommandBuffer::InsertSyncPoint() {
+ uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
+ QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point));
+ return sync_point;
+}
+
+uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
+ return g_sync_point_manager.Get().GenerateSyncPoint();
+}
+
+void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
+ QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point));
+}
+
+void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ if (mailbox_manager->UsesSync()) {
+ bool make_current_success = false;
+ {
+ base::AutoLock lock(command_buffer_lock_);
+ make_current_success = MakeCurrent();
+ }
+ if (make_current_success)
+ mailbox_manager->PushTextureUpdates();
+ }
+ g_sync_point_manager.Get().RetireSyncPoint(sync_point);
+}
+
+void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
+ const base::Closure& callback) {
+ CheckSequencedThread();
+ QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point,
+ WrapCallback(callback)));
+}
+
+void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
+ unsigned sync_point,
+ const base::Closure& callback) {
+ if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
+ callback.Run();
+ } else {
+ service_->ScheduleIdleWork(
+ base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
+ gpu_thread_weak_ptr_,
+ sync_point,
+ callback));
+ }
+}
+
+void InProcessCommandBuffer::SignalQuery(unsigned query_id,
+ const base::Closure& callback) {
+ CheckSequencedThread();
+ QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
+ base::Unretained(this),
+ query_id,
+ WrapCallback(callback)));
+}
+
+void InProcessCommandBuffer::SignalQueryOnGpuThread(
+ unsigned query_id,
+ const base::Closure& callback) {
+ gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
+ DCHECK(query_manager_);
+
+ gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
+ if (!query)
+ callback.Run();
+ else
+ query->AddCallback(callback);
+}
+
+void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
+
+uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
+ base::WaitableEvent completion(true, false);
+ uint32 stream_id = 0;
+ base::Callback<uint32(void)> task =
+ base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
+ base::Unretained(this),
+ texture_id);
+ QueueTask(
+ base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
+ completion.Wait();
+ return stream_id;
+}
+
+uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
+ uint32 client_texture_id) {
+#if defined(OS_ANDROID)
+ return stream_texture_manager_->CreateStreamTexture(
+ client_texture_id, decoder_->GetContextGroup()->texture_manager());
+#else
+ return 0;
+#endif
+}
+
+gpu::error::Error InProcessCommandBuffer::GetLastError() {
+ CheckSequencedThread();
+ return last_state_.error;
+}
+
+bool InProcessCommandBuffer::Initialize() {
+ NOTREACHED();
+ return false;
+}
+
+namespace {
+
+void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Closure& callback) {
+ if (!loop->BelongsToCurrentThread()) {
+ loop->PostTask(FROM_HERE, callback);
+ } else {
+ callback.Run();
+ }
+}
+
+void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
+ DCHECK(callback.get());
+ callback->Run();
+}
+
+} // anonymous namespace
+
+base::Closure InProcessCommandBuffer::WrapCallback(
+ const base::Closure& callback) {
+ // Make sure the callback gets deleted on the target thread by passing
+ // ownership.
+ scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
+ base::Closure callback_on_client_thread =
+ base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
+ base::Closure wrapped_callback =
+ base::Bind(&PostCallback, base::MessageLoopProxy::current(),
+ callback_on_client_thread);
+ return wrapped_callback;
+}
+
+#if defined(OS_ANDROID)
+scoped_refptr<gfx::SurfaceTexture>
+InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
+ DCHECK(stream_texture_manager_);
+ return stream_texture_manager_->GetSurfaceTexture(stream_id);
+}
+#endif
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/in_process_command_buffer.h b/gpu/command_buffer/service/in_process_command_buffer.h
new file mode 100644
index 0000000..b650725
--- /dev/null
+++ b/gpu/command_buffer/service/in_process_command_buffer.h
@@ -0,0 +1,235 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
+
+#include <map>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/gpu_export.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+
+namespace base {
+class SequenceChecker;
+}
+
+namespace gfx {
+class GLContext;
+class GLShareGroup;
+class GLSurface;
+class Size;
+}
+
+#if defined(OS_ANDROID)
+namespace gfx {
+class SurfaceTexture;
+}
+namespace gpu {
+class StreamTextureManagerInProcess;
+}
+#endif
+
+namespace gpu {
+
+namespace gles2 {
+class GLES2Decoder;
+class MailboxManager;
+class ShaderTranslatorCache;
+}
+
+class CommandBufferServiceBase;
+class GpuScheduler;
+class TransferBufferManagerInterface;
+
+// This class provides a thread-safe interface to the global GPU service (for
+// example GPU thread) when being run in single process mode.
+// However, the behavior for accessing one context (i.e. one instance of this
+// class) from different client threads is undefined.
+class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
+ public GpuControl {
+ public:
+ class Service;
+ explicit InProcessCommandBuffer(const scoped_refptr<Service>& service);
+ virtual ~InProcessCommandBuffer();
+
+ // If |surface| is not NULL, use it directly; in this case, the command
+ // buffer gpu thread must be the same as the client thread. Otherwise create
+ // a new GLSurface.
+ bool Initialize(scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ const base::Closure& context_lost_callback,
+ InProcessCommandBuffer* share_group);
+ void Destroy();
+
+ // CommandBuffer implementation:
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void Flush(int32 put_offset) OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int32 shm_id) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+ virtual gpu::error::Error GetLastError() OVERRIDE;
+
+ // GpuControl implementation:
+ virtual gpu::Capabilities GetCapabilities() OVERRIDE;
+ virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) OVERRIDE;
+ virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
+ virtual uint32 InsertSyncPoint() OVERRIDE;
+ virtual uint32 InsertFutureSyncPoint() OVERRIDE;
+ virtual void RetireSyncPoint(uint32 sync_point) OVERRIDE;
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SignalQuery(uint32 query_id,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+ virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
+
+ // The serializer interface to the GPU service (i.e. thread).
+ class Service {
+ public:
+ Service();
+ virtual ~Service();
+
+ virtual void AddRef() const = 0;
+ virtual void Release() const = 0;
+
+ // Queues a task to run as soon as possible.
+ virtual void ScheduleTask(const base::Closure& task) = 0;
+
+ // Schedules |callback| to run at an appropriate time for performing idle
+ // work.
+ virtual void ScheduleIdleWork(const base::Closure& task) = 0;
+
+ virtual bool UseVirtualizedGLContexts() = 0;
+ virtual scoped_refptr<gles2::ShaderTranslatorCache>
+ shader_translator_cache() = 0;
+ scoped_refptr<gles2::MailboxManager> mailbox_manager();
+
+ private:
+ scoped_refptr<gles2::MailboxManager> mailbox_manager_;
+ };
+
+#if defined(OS_ANDROID)
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id);
+#endif
+
+ private:
+ struct InitializeOnGpuThreadParams {
+ bool is_offscreen;
+ gfx::AcceleratedWidget window;
+ const gfx::Size& size;
+ const std::vector<int32>& attribs;
+ gfx::GpuPreference gpu_preference;
+ gpu::Capabilities* capabilities; // Ouptut.
+ InProcessCommandBuffer* context_group;
+
+ InitializeOnGpuThreadParams(bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ gpu::Capabilities* capabilities,
+ InProcessCommandBuffer* share_group)
+ : is_offscreen(is_offscreen),
+ window(window),
+ size(size),
+ attribs(attribs),
+ gpu_preference(gpu_preference),
+ capabilities(capabilities),
+ context_group(share_group) {}
+ };
+
+ bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
+ bool DestroyOnGpuThread();
+ void FlushOnGpuThread(int32 put_offset);
+ void ScheduleIdleWorkOnGpuThread();
+ uint32 CreateStreamTextureOnGpuThread(uint32 client_texture_id);
+ bool MakeCurrent();
+ base::Closure WrapCallback(const base::Closure& callback);
+ State GetStateFast();
+ void QueueTask(const base::Closure& task) { service_->ScheduleTask(task); }
+ void CheckSequencedThread();
+ void RetireSyncPointOnGpuThread(uint32 sync_point);
+ void SignalSyncPointOnGpuThread(uint32 sync_point,
+ const base::Closure& callback);
+ void SignalQueryOnGpuThread(unsigned query_id, const base::Closure& callback);
+ void DestroyTransferBufferOnGpuThread(int32 id);
+
+ // Callbacks:
+ void OnContextLost();
+ void OnResizeView(gfx::Size size, float scale_factor);
+ bool GetBufferChanged(int32 transfer_buffer_id);
+ void PumpCommands();
+ void PerformIdleWork();
+
+ static scoped_refptr<Service> GetDefaultService();
+
+ // Members accessed on the gpu thread (possibly with the exception of
+ // creation):
+ bool context_lost_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<gles2::GLES2Decoder> decoder_;
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+ base::Closure context_lost_callback_;
+ bool idle_work_pending_; // Used to throttle PerformIdleWork.
+
+ // Members accessed on the client thread:
+ State last_state_;
+ int32 last_put_offset_;
+ gpu::Capabilities capabilities_;
+
+ // Accessed on both threads:
+ scoped_ptr<CommandBufferServiceBase> command_buffer_;
+ base::Lock command_buffer_lock_;
+ base::WaitableEvent flush_event_;
+ scoped_refptr<Service> service_;
+ State state_after_last_flush_;
+ base::Lock state_after_last_flush_lock_;
+ scoped_refptr<gfx::GLShareGroup> gl_share_group_;
+
+#if defined(OS_ANDROID)
+ scoped_ptr<StreamTextureManagerInProcess> stream_texture_manager_;
+#endif
+
+ // Only used with explicit scheduling and the gpu thread is the same as
+ // the client thread.
+ scoped_ptr<base::SequenceChecker> sequence_checker_;
+
+ base::WeakPtr<InProcessCommandBuffer> gpu_thread_weak_ptr_;
+ base::WeakPtrFactory<InProcessCommandBuffer> gpu_thread_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InProcessCommandBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
diff --git a/gpu/command_buffer/service/logger.cc b/gpu/command_buffer/service/logger.cc
new file mode 100644
index 0000000..1fd2933
--- /dev/null
+++ b/gpu/command_buffer/service/logger.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/logger.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+
+namespace gpu {
+namespace gles2 {
+
+Logger::Logger(const DebugMarkerManager* debug_marker_manager)
+ : debug_marker_manager_(debug_marker_manager),
+ log_message_count_(0),
+ log_synthesized_gl_errors_(true) {
+ Logger* this_temp = this;
+ this_in_hex_ = std::string("GroupMarkerNotSet(crbug.com/242999)!:") +
+ base::HexEncode(&this_temp, sizeof(this_temp));
+}
+
+Logger::~Logger() {}
+
+void Logger::LogMessage(
+ const char* filename, int line, const std::string& msg) {
+ if (log_message_count_ < kMaxLogMessages ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGLErrorLimit)) {
+ std::string prefixed_msg(std::string("[") + GetLogPrefix() + "]" + msg);
+ ++log_message_count_;
+ // LOG this unless logging is turned off as any chromium code that
+ // generates these errors probably has a bug.
+ if (log_synthesized_gl_errors_) {
+ ::logging::LogMessage(
+ filename, line, ::logging::LOG_ERROR).stream() << prefixed_msg;
+ }
+ if (!msg_callback_.is_null()) {
+ msg_callback_.Run(0, prefixed_msg);
+ }
+ } else {
+ if (log_message_count_ == kMaxLogMessages) {
+ ++log_message_count_;
+ LOG(ERROR)
+ << "Too many GL errors, not reporting any more for this context."
+ << " use --disable-gl-error-limit to see all errors.";
+ }
+ }
+}
+
+const std::string& Logger::GetLogPrefix() const {
+ const std::string& prefix(debug_marker_manager_->GetMarker());
+ return prefix.empty() ? this_in_hex_ : prefix;
+}
+
+void Logger::SetMsgCallback(const MsgCallback& callback) {
+ msg_callback_ = callback;
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/logger.h b/gpu/command_buffer/service/logger.h
new file mode 100644
index 0000000..4691443
--- /dev/null
+++ b/gpu/command_buffer/service/logger.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the Logger class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+typedef base::Callback<void(int32 id, const std::string& msg)> MsgCallback;
+
+class DebugMarkerManager;
+
+class GPU_EXPORT Logger {
+ public:
+ static const int kMaxLogMessages = 256;
+
+ explicit Logger(const DebugMarkerManager* debug_marker_manager);
+ ~Logger();
+
+ void LogMessage(const char* filename, int line, const std::string& msg);
+ const std::string& GetLogPrefix() const;
+
+ // Defaults to true. Set to false for the gpu_unittests as they
+ // are explicitly checking errors are generated and so don't need the numerous
+ // messages. Otherwise, chromium code that generates these errors likely has a
+ // bug.
+ void set_log_synthesized_gl_errors(bool enabled) {
+ log_synthesized_gl_errors_ = enabled;
+ }
+
+ void SetMsgCallback(const MsgCallback& callback);
+
+ private:
+ // Uses the current marker to add information to logs.
+ const DebugMarkerManager* debug_marker_manager_;
+ std::string this_in_hex_;
+
+ int log_message_count_;
+ bool log_synthesized_gl_errors_;
+
+ MsgCallback msg_callback_;
+ DISALLOW_COPY_AND_ASSIGN(Logger);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+
diff --git a/gpu/command_buffer/service/mailbox_manager.cc b/gpu/command_buffer/service/mailbox_manager.cc
new file mode 100644
index 0000000..e6962df
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+
+#include <algorithm>
+
+#include "crypto/random.h"
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+MailboxManager::MailboxManager()
+ : mailbox_to_textures_(std::ptr_fun(&MailboxManager::TargetNameLess)),
+ sync_(MailboxSynchronizer::GetInstance()) {
+}
+
+MailboxManager::~MailboxManager() {
+ DCHECK(mailbox_to_textures_.empty());
+ DCHECK(textures_to_mailboxes_.empty());
+}
+
+Texture* MailboxManager::ConsumeTexture(unsigned target,
+ const Mailbox& mailbox) {
+ TargetName target_name(target, mailbox);
+ MailboxToTextureMap::iterator it =
+ mailbox_to_textures_.find(target_name);
+ if (it != mailbox_to_textures_.end())
+ return it->second->first;
+
+ if (sync_) {
+ // See if it's visible in another mailbox manager, and if so make it visible
+ // here too.
+ Texture* texture = sync_->CreateTextureFromMailbox(target, mailbox);
+ if (texture) {
+ InsertTexture(target_name, texture);
+ DCHECK_EQ(0U, texture->refs_.size());
+ }
+ return texture;
+ }
+
+ return NULL;
+}
+
+void MailboxManager::ProduceTexture(unsigned target,
+ const Mailbox& mailbox,
+ Texture* texture) {
+ TargetName target_name(target, mailbox);
+ MailboxToTextureMap::iterator it = mailbox_to_textures_.find(target_name);
+ if (it != mailbox_to_textures_.end()) {
+ if (it->second->first == texture)
+ return;
+ TextureToMailboxMap::iterator texture_it = it->second;
+ mailbox_to_textures_.erase(it);
+ textures_to_mailboxes_.erase(texture_it);
+ }
+ InsertTexture(target_name, texture);
+}
+
+void MailboxManager::InsertTexture(TargetName target_name, Texture* texture) {
+ texture->SetMailboxManager(this);
+ TextureToMailboxMap::iterator texture_it =
+ textures_to_mailboxes_.insert(std::make_pair(texture, target_name));
+ mailbox_to_textures_.insert(std::make_pair(target_name, texture_it));
+ DCHECK_EQ(mailbox_to_textures_.size(), textures_to_mailboxes_.size());
+}
+
+void MailboxManager::TextureDeleted(Texture* texture) {
+ std::pair<TextureToMailboxMap::iterator,
+ TextureToMailboxMap::iterator> range =
+ textures_to_mailboxes_.equal_range(texture);
+ for (TextureToMailboxMap::iterator it = range.first;
+ it != range.second; ++it) {
+ size_t count = mailbox_to_textures_.erase(it->second);
+ DCHECK(count == 1);
+ }
+ textures_to_mailboxes_.erase(range.first, range.second);
+ DCHECK_EQ(mailbox_to_textures_.size(), textures_to_mailboxes_.size());
+
+ if (sync_)
+ sync_->TextureDeleted(texture);
+}
+
+void MailboxManager::PushTextureUpdates() {
+ if (sync_)
+ sync_->PushTextureUpdates(this);
+}
+
+void MailboxManager::PullTextureUpdates() {
+ if (sync_)
+ sync_->PullTextureUpdates(this);
+}
+
+MailboxManager::TargetName::TargetName(unsigned target, const Mailbox& mailbox)
+ : target(target),
+ mailbox(mailbox) {
+}
+
+bool MailboxManager::TargetNameLess(const MailboxManager::TargetName& lhs,
+ const MailboxManager::TargetName& rhs) {
+ if (lhs.target != rhs.target)
+ return lhs.target < rhs.target;
+ return lhs.mailbox < rhs.mailbox;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_manager.h b/gpu/command_buffer/service/mailbox_manager.h
new file mode 100644
index 0000000..e1b36cb
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+
+#include <functional>
+#include <map>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/gpu_export.h"
+
+typedef signed char GLbyte;
+
+namespace gpu {
+namespace gles2 {
+
+class MailboxSynchronizer;
+class Texture;
+class TextureManager;
+
+// Manages resources scoped beyond the context or context group level.
+class GPU_EXPORT MailboxManager : public base::RefCounted<MailboxManager> {
+ public:
+ MailboxManager();
+
+ // Look up the texture definition from the named mailbox.
+ Texture* ConsumeTexture(unsigned target, const Mailbox& mailbox);
+
+ // Put the texture into the named mailbox.
+ void ProduceTexture(unsigned target,
+ const Mailbox& mailbox,
+ Texture* texture);
+
+ // Returns whether this manager synchronizes with other instances.
+ bool UsesSync() { return sync_ != NULL; }
+
+ // Used with the MailboxSynchronizer to push/pull texture state to/from
+ // other manager instances.
+ void PushTextureUpdates();
+ void PullTextureUpdates();
+
+ // Destroy any mailbox that reference the given texture.
+ void TextureDeleted(Texture* texture);
+
+ private:
+ friend class base::RefCounted<MailboxManager>;
+ friend class MailboxSynchronizer;
+
+ ~MailboxManager();
+
+ struct TargetName {
+ TargetName(unsigned target, const Mailbox& mailbox);
+ unsigned target;
+ Mailbox mailbox;
+ };
+ void InsertTexture(TargetName target_name, Texture* texture);
+
+ static bool TargetNameLess(const TargetName& lhs, const TargetName& rhs);
+
+ // This is a bidirectional map between mailbox and textures. We can have
+ // multiple mailboxes per texture, but one texture per mailbox. We keep an
+ // iterator in the MailboxToTextureMap to be able to manage changes to
+ // the TextureToMailboxMap efficiently.
+ typedef std::multimap<Texture*, TargetName> TextureToMailboxMap;
+ typedef std::map<TargetName,
+ TextureToMailboxMap::iterator,
+ std::pointer_to_binary_function<const TargetName&,
+ const TargetName&,
+ bool> > MailboxToTextureMap;
+
+ MailboxToTextureMap mailbox_to_textures_;
+ TextureToMailboxMap textures_to_mailboxes_;
+
+ MailboxSynchronizer* sync_;
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxManager);
+};
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/mailbox_manager_unittest.cc b/gpu/command_buffer/service/mailbox_manager_unittest.cc
new file mode 100644
index 0000000..df1cd4e
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager_unittest.cc
@@ -0,0 +1,479 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+namespace gpu {
+namespace gles2 {
+
+using namespace ::testing;
+
+class MailboxManagerTest : public GpuServiceTest {
+ public:
+ MailboxManagerTest() : initialized_synchronizer_(false) {}
+ virtual ~MailboxManagerTest() {}
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ feature_info_ = new FeatureInfo;
+ manager_ = new MailboxManager;
+ }
+
+ virtual void SetUpWithSynchronizer() {
+ GpuServiceTest::SetUp();
+ MailboxSynchronizer::Initialize();
+ initialized_synchronizer_ = true;
+ feature_info_ = new FeatureInfo;
+ manager_ = new MailboxManager;
+ }
+
+ virtual void TearDown() {
+ if (initialized_synchronizer_)
+ MailboxSynchronizer::Terminate();
+ GpuServiceTest::TearDown();
+ }
+
+ Texture* CreateTexture() {
+ return new Texture(1);
+ }
+
+ void SetTarget(Texture* texture, GLenum target, GLuint max_level) {
+ texture->SetTarget(NULL, target, max_level);
+ }
+
+ void SetLevelInfo(
+ Texture* texture,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ texture->SetLevelInfo(NULL,
+ target,
+ level,
+ internal_format,
+ width,
+ height,
+ depth,
+ border,
+ format,
+ type,
+ cleared);
+ }
+
+ GLenum SetParameter(Texture* texture, GLenum pname, GLint param) {
+ return texture->SetParameteri(feature_info_.get(), pname, param);
+ }
+
+ void DestroyTexture(Texture* texture) {
+ delete texture;
+ }
+
+ scoped_refptr<MailboxManager> manager_;
+
+ private:
+ bool initialized_synchronizer_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxManagerTest);
+};
+
+// Tests basic produce/consume behavior.
+TEST_F(MailboxManagerTest, Basic) {
+ Texture* texture = CreateTexture();
+
+ Mailbox name = Mailbox::Generate();
+ manager_->ProduceTexture(0, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name));
+
+ // We can consume multiple times.
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name));
+
+ // Wrong target should fail the consume.
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(1, name));
+
+ // Destroy should cleanup the mailbox.
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name));
+}
+
+// Tests behavior with multiple produce on the same texture.
+TEST_F(MailboxManagerTest, ProduceMultipleMailbox) {
+ Texture* texture = CreateTexture();
+
+ Mailbox name1 = Mailbox::Generate();
+
+ manager_->ProduceTexture(0, name1, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+
+ // Can produce a second time with the same mailbox.
+ manager_->ProduceTexture(0, name1, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+
+ // Can produce again, with a different mailbox.
+ Mailbox name2 = Mailbox::Generate();
+ manager_->ProduceTexture(0, name2, texture);
+
+ // Still available under all mailboxes.
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name2));
+
+ // Destroy should cleanup all mailboxes.
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name2));
+}
+
+// Tests behavior with multiple produce on the same mailbox with different
+// textures.
+TEST_F(MailboxManagerTest, ProduceMultipleTexture) {
+ Texture* texture1 = CreateTexture();
+ Texture* texture2 = CreateTexture();
+
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(0, name, texture1);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name));
+
+ // Can produce a second time with the same mailbox, but different texture.
+ manager_->ProduceTexture(0, name, texture2);
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name));
+
+ // Destroying the texture that's under no mailbox shouldn't have an effect.
+ DestroyTexture(texture1);
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name));
+
+ // Destroying the texture that's bound should clean up.
+ DestroyTexture(texture2);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name));
+}
+
+TEST_F(MailboxManagerTest, ProduceMultipleTextureMailbox) {
+ Texture* texture1 = CreateTexture();
+ Texture* texture2 = CreateTexture();
+ Mailbox name1 = Mailbox::Generate();
+ Mailbox name2 = Mailbox::Generate();
+
+ // Put texture1 on name1 and name2.
+ manager_->ProduceTexture(0, name1, texture1);
+ manager_->ProduceTexture(0, name2, texture1);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name2));
+
+ // Put texture2 on name2.
+ manager_->ProduceTexture(0, name2, texture2);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name2));
+
+ // Destroy texture1, shouldn't affect name2.
+ DestroyTexture(texture1);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name2));
+
+ DestroyTexture(texture2);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name2));
+}
+
+const GLsizei kMaxTextureWidth = 64;
+const GLsizei kMaxTextureHeight = 64;
+const GLsizei kMaxTextureDepth = 1;
+
+class MailboxManagerSyncTest : public MailboxManagerTest {
+ public:
+ MailboxManagerSyncTest() {}
+ virtual ~MailboxManagerSyncTest() {}
+
+ protected:
+ virtual void SetUp() {
+ MailboxManagerTest::SetUpWithSynchronizer();
+ manager2_ = new MailboxManager;
+ context_ = new gfx::GLContextStub();
+ surface_ = new gfx::GLSurfaceStub();
+ context_->MakeCurrent(surface_.get());
+ }
+
+ Texture* DefineTexture() {
+ Texture* texture = CreateTexture();
+ const GLsizei levels_needed = TextureManager::ComputeMipMapCount(
+ GL_TEXTURE_2D, kMaxTextureWidth, kMaxTextureHeight, kMaxTextureDepth);
+ SetTarget(texture, GL_TEXTURE_2D, levels_needed);
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ SetParameter(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ SetParameter(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ return texture;
+ }
+
+ void SetupUpdateTexParamExpectations(GLuint texture_id,
+ GLenum min,
+ GLenum mag,
+ GLenum wrap_s,
+ GLenum wrap_t) {
+ DCHECK(texture_id);
+ const GLuint kCurrentTexture = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(GL_TEXTURE_BINDING_2D, _))
+ .WillOnce(SetArgPointee<1>(kCurrentTexture))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, texture_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, min))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mag))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_s))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_t))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Flush())
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kCurrentTexture))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ virtual void TearDown() {
+ context_->ReleaseCurrent(NULL);
+ MailboxManagerTest::TearDown();
+ }
+
+ scoped_refptr<MailboxManager> manager2_;
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MailboxManagerSyncTest);
+};
+
+TEST_F(MailboxManagerSyncTest, ProduceDestroy) {
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ InSequence sequence;
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+TEST_F(MailboxManagerSyncTest, ProduceSyncDestroy) {
+ InSequence sequence;
+
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // Synchronize
+ manager_->PushTextureUpdates();
+ manager2_->PullTextureUpdates();
+
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+// Duplicates a texture into a second manager instance, and then
+// makes sure a redefinition becomes visible there too.
+TEST_F(MailboxManagerSyncTest, ProduceConsumeResize) {
+ const GLuint kNewTextureId = 1234;
+ InSequence sequence;
+
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // Synchronize
+ manager_->PushTextureUpdates();
+ manager2_->PullTextureUpdates();
+
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ Texture* new_texture = manager2_->ConsumeTexture(GL_TEXTURE_2D, name);
+ EXPECT_FALSE(new_texture == NULL);
+ EXPECT_NE(texture, new_texture);
+ EXPECT_EQ(kNewTextureId, new_texture->service_id());
+
+ // Resize original texture
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 32,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Should have been orphaned
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Synchronize again
+ manager_->PushTextureUpdates();
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ manager2_->PullTextureUpdates();
+ GLsizei width, height;
+ new_texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height);
+ EXPECT_EQ(16, width);
+ EXPECT_EQ(32, height);
+
+ // Should have gotten a new attachment
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) != NULL);
+ // Resize original texture again....
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 64,
+ 64,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // ...and immediately delete the texture which should save the changes.
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ DestroyTexture(texture);
+
+ // Should be still around since there is a ref from manager2
+ EXPECT_EQ(new_texture, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // The last change to the texture should be visible without a sync point (i.e.
+ // push).
+ manager2_->PullTextureUpdates();
+ new_texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height);
+ EXPECT_EQ(64, width);
+ EXPECT_EQ(64, height);
+
+ DestroyTexture(new_texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+// Makes sure changes are correctly published even when updates are
+// pushed in both directions, i.e. makes sure we don't clobber a shared
+// texture definition with an older version.
+TEST_F(MailboxManagerSyncTest, ProduceConsumeBidirectional) {
+ const GLuint kNewTextureId1 = 1234;
+ const GLuint kNewTextureId2 = 4321;
+
+ Texture* texture1 = DefineTexture();
+ Mailbox name1 = Mailbox::Generate();
+ Texture* texture2 = DefineTexture();
+ Mailbox name2 = Mailbox::Generate();
+ Texture* new_texture1 = NULL;
+ Texture* new_texture2 = NULL;
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name1, texture1);
+ manager2_->ProduceTexture(GL_TEXTURE_2D, name2, texture2);
+
+ // Make visible.
+ manager_->PushTextureUpdates();
+ manager2_->PushTextureUpdates();
+
+ // Create textures in the other manager instances for texture1 and texture2,
+ // respectively to create a real sharing scenario. Otherwise, there would
+ // never be conflicting updates/pushes.
+ {
+ InSequence sequence;
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId1));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId1, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ new_texture1 = manager2_->ConsumeTexture(GL_TEXTURE_2D, name1);
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId2));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId2, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ new_texture2 = manager_->ConsumeTexture(GL_TEXTURE_2D, name2);
+ }
+ EXPECT_EQ(kNewTextureId1, new_texture1->service_id());
+ EXPECT_EQ(kNewTextureId2, new_texture2->service_id());
+
+ // Make a change to texture1
+ DCHECK_EQ(static_cast<GLuint>(GL_LINEAR), texture1->min_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR),
+ SetParameter(texture1, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+
+ // Make sure this does not clobber it with the previous version we pushed.
+ manager_->PullTextureUpdates();
+
+ // Make a change to texture2
+ DCHECK_EQ(static_cast<GLuint>(GL_LINEAR), texture2->mag_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR),
+ SetParameter(texture2, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+
+ Mock::VerifyAndClearExpectations(gl_.get());
+
+ // Synchronize in both directions
+ manager_->PushTextureUpdates();
+ manager2_->PushTextureUpdates();
+ // manager1 should see the change to texture2 mag_filter being applied.
+ SetupUpdateTexParamExpectations(
+ new_texture2->service_id(), GL_LINEAR, GL_NEAREST, GL_REPEAT, GL_REPEAT);
+ manager_->PullTextureUpdates();
+ // manager2 should see the change to texture1 min_filter being applied.
+ SetupUpdateTexParamExpectations(
+ new_texture1->service_id(), GL_NEAREST, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ manager2_->PullTextureUpdates();
+
+ DestroyTexture(texture1);
+ DestroyTexture(texture2);
+ DestroyTexture(new_texture1);
+ DestroyTexture(new_texture2);
+}
+
+// TODO: different texture into same mailbox
+
+// TODO: same texture, multiple mailboxes
+
+// TODO: Produce incomplete texture
+
+// TODO: Texture::level_infos_[][].size()
+
+// TODO: unsupported targets and formats
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_synchronizer.cc b/gpu/command_buffer/service/mailbox_synchronizer.cc
new file mode 100644
index 0000000..eac31f9
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_synchronizer.cc
@@ -0,0 +1,231 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+
+#include "base/bind.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+MailboxSynchronizer* g_instance = NULL;
+
+} // anonymous namespace
+
+// static
+bool MailboxSynchronizer::Initialize() {
+ DCHECK(!g_instance);
+ DCHECK(gfx::GetGLImplementation() != gfx::kGLImplementationNone)
+ << "GL bindings not initialized";
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationMockGL:
+ break;
+ case gfx::kGLImplementationEGLGLES2:
+#if !defined(OS_MACOSX)
+ {
+ if (!gfx::g_driver_egl.ext.b_EGL_KHR_image_base ||
+ !gfx::g_driver_egl.ext.b_EGL_KHR_gl_texture_2D_image ||
+ !gfx::g_driver_gl.ext.b_GL_OES_EGL_image ||
+ !gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
+ LOG(WARNING) << "MailboxSync not supported due to missing EGL "
+ "image/fence support";
+ return false;
+ }
+ }
+ break;
+#endif
+ default:
+ NOTREACHED();
+ return false;
+ }
+ g_instance = new MailboxSynchronizer;
+ return true;
+}
+
+// static
+void MailboxSynchronizer::Terminate() {
+ DCHECK(g_instance);
+ delete g_instance;
+ g_instance = NULL;
+}
+
+// static
+MailboxSynchronizer* MailboxSynchronizer::GetInstance() {
+ return g_instance;
+}
+
+MailboxSynchronizer::TargetName::TargetName(unsigned target,
+ const Mailbox& mailbox)
+ : target(target), mailbox(mailbox) {}
+
+MailboxSynchronizer::TextureGroup::TextureGroup(
+ const TextureDefinition& definition)
+ : definition(definition) {}
+
+MailboxSynchronizer::TextureGroup::~TextureGroup() {}
+
+MailboxSynchronizer::TextureVersion::TextureVersion(
+ linked_ptr<TextureGroup> group)
+ : version(group->definition.version()), group(group) {}
+
+MailboxSynchronizer::TextureVersion::~TextureVersion() {}
+
+MailboxSynchronizer::MailboxSynchronizer() {}
+
+MailboxSynchronizer::~MailboxSynchronizer() {
+ DCHECK_EQ(0U, textures_.size());
+}
+
+void MailboxSynchronizer::ReassociateMailboxLocked(
+ const TargetName& target_name,
+ TextureGroup* group) {
+ lock_.AssertAcquired();
+ for (TextureMap::iterator it = textures_.begin(); it != textures_.end();
+ it++) {
+ std::set<TargetName>::iterator mb_it =
+ it->second.group->mailboxes.find(target_name);
+ if (it->second.group != group &&
+ mb_it != it->second.group->mailboxes.end()) {
+ it->second.group->mailboxes.erase(mb_it);
+ }
+ }
+ group->mailboxes.insert(target_name);
+}
+
+linked_ptr<MailboxSynchronizer::TextureGroup>
+MailboxSynchronizer::GetGroupForMailboxLocked(const TargetName& target_name) {
+ lock_.AssertAcquired();
+ for (TextureMap::iterator it = textures_.begin(); it != textures_.end();
+ it++) {
+ std::set<TargetName>::const_iterator mb_it =
+ it->second.group->mailboxes.find(target_name);
+ if (mb_it != it->second.group->mailboxes.end())
+ return it->second.group;
+ }
+ return make_linked_ptr<MailboxSynchronizer::TextureGroup>(NULL);
+}
+
+Texture* MailboxSynchronizer::CreateTextureFromMailbox(unsigned target,
+ const Mailbox& mailbox) {
+ base::AutoLock lock(lock_);
+ TargetName target_name(target, mailbox);
+ linked_ptr<TextureGroup> group = GetGroupForMailboxLocked(target_name);
+ if (group.get()) {
+ Texture* new_texture = group->definition.CreateTexture();
+ if (new_texture)
+ textures_.insert(std::make_pair(new_texture, TextureVersion(group)));
+ return new_texture;
+ }
+
+ return NULL;
+}
+
+void MailboxSynchronizer::TextureDeleted(Texture* texture) {
+ base::AutoLock lock(lock_);
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ // TODO: We could avoid the update if this was the last ref.
+ UpdateTextureLocked(it->first, it->second);
+ textures_.erase(it);
+ }
+}
+
+void MailboxSynchronizer::PushTextureUpdates(MailboxManager* manager) {
+ base::AutoLock lock(lock_);
+ for (MailboxManager::MailboxToTextureMap::const_iterator texture_it =
+ manager->mailbox_to_textures_.begin();
+ texture_it != manager->mailbox_to_textures_.end();
+ texture_it++) {
+ TargetName target_name(texture_it->first.target, texture_it->first.mailbox);
+ Texture* texture = texture_it->second->first;
+ // TODO(sievers): crbug.com/352274
+ // Should probably only fail if it already *has* mipmaps, while allowing
+ // incomplete textures here. Also reconsider how to fail otherwise.
+ bool needs_mips = texture->min_filter() != GL_NEAREST &&
+ texture->min_filter() != GL_LINEAR;
+ if (target_name.target != GL_TEXTURE_2D || needs_mips)
+ continue;
+
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ TextureVersion& texture_version = it->second;
+ TextureGroup* group = texture_version.group.get();
+ std::set<TargetName>::const_iterator mb_it =
+ group->mailboxes.find(target_name);
+ if (mb_it == group->mailboxes.end()) {
+ // We previously did not associate this texture with the given mailbox.
+ // Unlink other texture groups from the mailbox.
+ ReassociateMailboxLocked(target_name, group);
+ }
+ UpdateTextureLocked(texture, texture_version);
+
+ } else {
+ // Skip compositor resources/tile textures.
+ // TODO: Remove this, see crbug.com/399226.
+ if (texture->pool() == GL_TEXTURE_POOL_MANAGED_CHROMIUM)
+ continue;
+
+ linked_ptr<TextureGroup> group = make_linked_ptr(new TextureGroup(
+ TextureDefinition(target_name.target, texture, 1, NULL)));
+
+ // Unlink other textures from this mailbox in case the name is not new.
+ ReassociateMailboxLocked(target_name, group.get());
+ textures_.insert(std::make_pair(texture, TextureVersion(group)));
+ }
+ }
+}
+
+void MailboxSynchronizer::UpdateTextureLocked(Texture* texture,
+ TextureVersion& texture_version) {
+ lock_.AssertAcquired();
+ gfx::GLImage* gl_image = texture->GetLevelImage(texture->target(), 0);
+ TextureGroup* group = texture_version.group.get();
+ scoped_refptr<NativeImageBuffer> image_buffer = group->definition.image();
+
+ // Make sure we don't clobber with an older version
+ if (!group->definition.IsOlderThan(texture_version.version))
+ return;
+
+ // Also don't push redundant updates. Note that it would break the
+ // versioning.
+ if (group->definition.Matches(texture))
+ return;
+
+ if (gl_image && !image_buffer->IsClient(gl_image)) {
+ LOG(ERROR) << "MailboxSync: Incompatible attachment";
+ return;
+ }
+
+ group->definition = TextureDefinition(texture->target(),
+ texture,
+ ++texture_version.version,
+ gl_image ? image_buffer : NULL);
+}
+
+void MailboxSynchronizer::PullTextureUpdates(MailboxManager* manager) {
+ base::AutoLock lock(lock_);
+ for (MailboxManager::MailboxToTextureMap::const_iterator texture_it =
+ manager->mailbox_to_textures_.begin();
+ texture_it != manager->mailbox_to_textures_.end();
+ texture_it++) {
+ Texture* texture = texture_it->second->first;
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ TextureDefinition& definition = it->second.group->definition;
+ if (it->second.version == definition.version() ||
+ definition.IsOlderThan(it->second.version))
+ continue;
+ it->second.version = definition.version();
+ definition.UpdateTexture(texture);
+ }
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_synchronizer.h b/gpu/command_buffer/service/mailbox_synchronizer.h
new file mode 100644
index 0000000..a845963
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_synchronizer.h
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+
+#include "gpu/command_buffer/common/mailbox.h"
+
+#include <map>
+#include <set>
+
+#include "base/memory/linked_ptr.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/service/texture_definition.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MailboxManager;
+class Texture;
+
+// A thread-safe proxy that can be used to emulate texture sharing across
+// share-groups.
+class MailboxSynchronizer {
+ public:
+ ~MailboxSynchronizer();
+
+ GPU_EXPORT static bool Initialize();
+ GPU_EXPORT static void Terminate();
+ static MailboxSynchronizer* GetInstance();
+
+ // Create a texture from a globally visible mailbox.
+ Texture* CreateTextureFromMailbox(unsigned target, const Mailbox& mailbox);
+
+ void PushTextureUpdates(MailboxManager* manager);
+ void PullTextureUpdates(MailboxManager* manager);
+
+ void TextureDeleted(Texture* texture);
+
+ private:
+ MailboxSynchronizer();
+
+ struct TargetName {
+ TargetName(unsigned target, const Mailbox& mailbox);
+ bool operator<(const TargetName& rhs) const {
+ return memcmp(this, &rhs, sizeof(rhs)) < 0;
+ }
+ bool operator!=(const TargetName& rhs) const {
+ return memcmp(this, &rhs, sizeof(rhs)) != 0;
+ }
+ bool operator==(const TargetName& rhs) const {
+ return !operator!=(rhs);
+ }
+ unsigned target;
+ Mailbox mailbox;
+ };
+
+ base::Lock lock_;
+
+ struct TextureGroup {
+ explicit TextureGroup(const TextureDefinition& definition);
+ ~TextureGroup();
+
+ TextureDefinition definition;
+ std::set<TargetName> mailboxes;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextureGroup);
+ };
+
+ struct TextureVersion {
+ explicit TextureVersion(linked_ptr<TextureGroup> group);
+ ~TextureVersion();
+
+ unsigned int version;
+ linked_ptr<TextureGroup> group;
+ };
+ typedef std::map<Texture*, TextureVersion> TextureMap;
+ TextureMap textures_;
+
+ linked_ptr<TextureGroup> GetGroupForMailboxLocked(
+ const TargetName& target_name);
+ void ReassociateMailboxLocked(
+ const TargetName& target_name,
+ TextureGroup* group);
+ void UpdateTextureLocked(Texture* texture, TextureVersion& texture_version);
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxSynchronizer);
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+
diff --git a/gpu/command_buffer/service/memory_program_cache.cc b/gpu/command_buffer/service/memory_program_cache.cc
new file mode 100644
index 0000000..87378aa
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache.cc
@@ -0,0 +1,370 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/memory_program_cache.h"
+
+#include "base/base64.h"
+#include "base/command_line.h"
+#include "base/metrics/histogram.h"
+#include "base/sha1.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/service/disk_cache_proto.pb.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace {
+
+size_t GetCacheSizeBytes() {
+ const CommandLine* command_line = CommandLine::ForCurrentProcess();
+ if (command_line->HasSwitch(switches::kGpuProgramCacheSizeKb)) {
+ size_t size;
+ if (base::StringToSizeT(
+ command_line->GetSwitchValueNative(switches::kGpuProgramCacheSizeKb),
+ &size))
+ return size * 1024;
+ }
+ return gpu::kDefaultMaxProgramCacheMemoryBytes;
+}
+
+} // anonymous namespace
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+enum ShaderMapType {
+ ATTRIB_MAP = 0,
+ UNIFORM_MAP,
+ VARYING_MAP
+};
+
+void StoreShaderInfo(ShaderMapType type, ShaderProto *proto,
+ const ShaderTranslator::VariableMap& map) {
+ ShaderTranslator::VariableMap::const_iterator iter;
+ for (iter = map.begin(); iter != map.end(); ++iter) {
+ ShaderInfoProto* info = NULL;
+ switch (type) {
+ case UNIFORM_MAP:
+ info = proto->add_uniforms();
+ break;
+ case ATTRIB_MAP:
+ info = proto->add_attribs();
+ break;
+ case VARYING_MAP:
+ info = proto->add_varyings();
+ break;
+ default: NOTREACHED();
+ }
+
+ info->set_key(iter->first);
+ info->set_type(iter->second.type);
+ info->set_size(iter->second.size);
+ info->set_precision(iter->second.precision);
+ info->set_static_use(iter->second.static_use);
+ info->set_name(iter->second.name);
+ }
+}
+
+void RetrieveShaderInfo(const ShaderInfoProto& proto,
+ ShaderTranslator::VariableMap* map) {
+ ShaderTranslator::VariableInfo info(
+ proto.type(), proto.size(), proto.precision(),
+ proto.static_use(), proto.name());
+ (*map)[proto.key()] = info;
+}
+
+void FillShaderProto(ShaderProto* proto, const char* sha,
+ const Shader* shader) {
+ proto->set_sha(sha, gpu::gles2::ProgramCache::kHashLength);
+ StoreShaderInfo(ATTRIB_MAP, proto, shader->attrib_map());
+ StoreShaderInfo(UNIFORM_MAP, proto, shader->uniform_map());
+ StoreShaderInfo(VARYING_MAP, proto, shader->varying_map());
+}
+
+void RunShaderCallback(const ShaderCacheCallback& callback,
+ GpuProgramProto* proto,
+ std::string sha_string) {
+ std::string shader;
+ proto->SerializeToString(&shader);
+
+ std::string key;
+ base::Base64Encode(sha_string, &key);
+ callback.Run(key, shader);
+}
+
+} // namespace
+
+MemoryProgramCache::MemoryProgramCache()
+ : max_size_bytes_(GetCacheSizeBytes()),
+ curr_size_bytes_(0),
+ store_(ProgramMRUCache::NO_AUTO_EVICT) {
+}
+
+MemoryProgramCache::MemoryProgramCache(const size_t max_cache_size_bytes)
+ : max_size_bytes_(max_cache_size_bytes),
+ curr_size_bytes_(0),
+ store_(ProgramMRUCache::NO_AUTO_EVICT) {
+}
+
+MemoryProgramCache::~MemoryProgramCache() {}
+
+void MemoryProgramCache::ClearBackend() {
+ store_.Clear();
+ DCHECK_EQ(0U, curr_size_bytes_);
+}
+
+ProgramCache::ProgramLoadResult MemoryProgramCache::LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ DCHECK(shader_a && !shader_a->signature_source().empty() &&
+ shader_b && !shader_b->signature_source().empty());
+ ComputeShaderHash(
+ shader_a->signature_source(), translator_a, a_sha);
+ ComputeShaderHash(
+ shader_b->signature_source(), translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ ProgramMRUCache::iterator found = store_.Get(sha_string);
+ if (found == store_.end()) {
+ return PROGRAM_LOAD_FAILURE;
+ }
+ const scoped_refptr<ProgramCacheValue> value = found->second;
+ glProgramBinary(program,
+ value->format(),
+ static_cast<const GLvoid*>(value->data()),
+ value->length());
+ GLint success = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &success);
+ if (success == GL_FALSE) {
+ return PROGRAM_LOAD_FAILURE;
+ }
+ shader_a->set_attrib_map(value->attrib_map_0());
+ shader_a->set_uniform_map(value->uniform_map_0());
+ shader_a->set_varying_map(value->varying_map_0());
+ shader_b->set_attrib_map(value->attrib_map_1());
+ shader_b->set_uniform_map(value->uniform_map_1());
+ shader_b->set_varying_map(value->varying_map_1());
+
+ if (!shader_callback.is_null() &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuShaderDiskCache)) {
+ scoped_ptr<GpuProgramProto> proto(
+ GpuProgramProto::default_instance().New());
+ proto->set_sha(sha, kHashLength);
+ proto->set_format(value->format());
+ proto->set_program(value->data(), value->length());
+
+ FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
+ FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
+ RunShaderCallback(shader_callback, proto.get(), sha_string);
+ }
+
+ return PROGRAM_LOAD_SUCCESS;
+}
+
+void MemoryProgramCache::SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) {
+ GLenum format;
+ GLsizei length = 0;
+ glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH_OES, &length);
+ if (length == 0 || static_cast<unsigned int>(length) > max_size_bytes_) {
+ return;
+ }
+ scoped_ptr<char[]> binary(new char[length]);
+ glGetProgramBinary(program,
+ length,
+ NULL,
+ &format,
+ binary.get());
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.ProgramBinarySizeBytes", length);
+
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ DCHECK(shader_a && !shader_a->signature_source().empty() &&
+ shader_b && !shader_b->signature_source().empty());
+ ComputeShaderHash(
+ shader_a->signature_source(), translator_a, a_sha);
+ ComputeShaderHash(
+ shader_b->signature_source(), translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, sizeof(sha));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeBeforeKb",
+ curr_size_bytes_ / 1024);
+
+ // Evict any cached program with the same key in favor of the least recently
+ // accessed.
+ ProgramMRUCache::iterator existing = store_.Peek(sha_string);
+ if(existing != store_.end())
+ store_.Erase(existing);
+
+ while (curr_size_bytes_ + length > max_size_bytes_) {
+ DCHECK(!store_.empty());
+ store_.Erase(store_.rbegin());
+ }
+
+ if (!shader_callback.is_null() &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuShaderDiskCache)) {
+ scoped_ptr<GpuProgramProto> proto(
+ GpuProgramProto::default_instance().New());
+ proto->set_sha(sha, kHashLength);
+ proto->set_format(format);
+ proto->set_program(binary.get(), length);
+
+ FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
+ FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
+ RunShaderCallback(shader_callback, proto.get(), sha_string);
+ }
+
+ store_.Put(sha_string,
+ new ProgramCacheValue(length,
+ format,
+ binary.release(),
+ sha_string,
+ a_sha,
+ shader_a->attrib_map(),
+ shader_a->uniform_map(),
+ shader_a->varying_map(),
+ b_sha,
+ shader_b->attrib_map(),
+ shader_b->uniform_map(),
+ shader_b->varying_map(),
+ this));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeAfterKb",
+ curr_size_bytes_ / 1024);
+}
+
+void MemoryProgramCache::LoadProgram(const std::string& program) {
+ scoped_ptr<GpuProgramProto> proto(GpuProgramProto::default_instance().New());
+ if (proto->ParseFromString(program)) {
+ ShaderTranslator::VariableMap vertex_attribs;
+ ShaderTranslator::VariableMap vertex_uniforms;
+ ShaderTranslator::VariableMap vertex_varyings;
+
+ for (int i = 0; i < proto->vertex_shader().attribs_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().attribs(i), &vertex_attribs);
+ }
+
+ for (int i = 0; i < proto->vertex_shader().uniforms_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().uniforms(i), &vertex_uniforms);
+ }
+
+ for (int i = 0; i < proto->vertex_shader().varyings_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().varyings(i), &vertex_varyings);
+ }
+
+ ShaderTranslator::VariableMap fragment_attribs;
+ ShaderTranslator::VariableMap fragment_uniforms;
+ ShaderTranslator::VariableMap fragment_varyings;
+
+ for (int i = 0; i < proto->fragment_shader().attribs_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().attribs(i),
+ &fragment_attribs);
+ }
+
+ for (int i = 0; i < proto->fragment_shader().uniforms_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().uniforms(i),
+ &fragment_uniforms);
+ }
+
+ for (int i = 0; i < proto->fragment_shader().varyings_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().varyings(i),
+ &fragment_varyings);
+ }
+
+ scoped_ptr<char[]> binary(new char[proto->program().length()]);
+ memcpy(binary.get(), proto->program().c_str(), proto->program().length());
+
+ store_.Put(proto->sha(),
+ new ProgramCacheValue(proto->program().length(),
+ proto->format(),
+ binary.release(),
+ proto->sha(),
+ proto->vertex_shader().sha().c_str(),
+ vertex_attribs,
+ vertex_uniforms,
+ vertex_varyings,
+ proto->fragment_shader().sha().c_str(),
+ fragment_attribs,
+ fragment_uniforms,
+ fragment_varyings,
+ this));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeAfterKb",
+ curr_size_bytes_ / 1024);
+ } else {
+ LOG(ERROR) << "Failed to parse proto file.";
+ }
+}
+
+MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
+ GLsizei length,
+ GLenum format,
+ const char* data,
+ const std::string& program_hash,
+ const char* shader_0_hash,
+ const ShaderTranslator::VariableMap& attrib_map_0,
+ const ShaderTranslator::VariableMap& uniform_map_0,
+ const ShaderTranslator::VariableMap& varying_map_0,
+ const char* shader_1_hash,
+ const ShaderTranslator::VariableMap& attrib_map_1,
+ const ShaderTranslator::VariableMap& uniform_map_1,
+ const ShaderTranslator::VariableMap& varying_map_1,
+ MemoryProgramCache* program_cache)
+ : length_(length),
+ format_(format),
+ data_(data),
+ program_hash_(program_hash),
+ shader_0_hash_(shader_0_hash, kHashLength),
+ attrib_map_0_(attrib_map_0),
+ uniform_map_0_(uniform_map_0),
+ varying_map_0_(varying_map_0),
+ shader_1_hash_(shader_1_hash, kHashLength),
+ attrib_map_1_(attrib_map_1),
+ uniform_map_1_(uniform_map_1),
+ varying_map_1_(varying_map_1),
+ program_cache_(program_cache) {
+ program_cache_->curr_size_bytes_ += length_;
+ program_cache_->LinkedProgramCacheSuccess(program_hash);
+}
+
+MemoryProgramCache::ProgramCacheValue::~ProgramCacheValue() {
+ program_cache_->curr_size_bytes_ -= length_;
+ program_cache_->Evict(program_hash_);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/memory_program_cache.h b/gpu/command_buffer/service/memory_program_cache.h
new file mode 100644
index 0000000..e72f9f5
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
+
+#include <map>
+#include <string>
+
+#include "base/containers/hash_tables.h"
+#include "base/containers/mru_cache.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Program cache that stores binaries completely in-memory
+class GPU_EXPORT MemoryProgramCache : public ProgramCache {
+ public:
+ MemoryProgramCache();
+ explicit MemoryProgramCache(const size_t max_cache_size_bytes);
+ virtual ~MemoryProgramCache();
+
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) OVERRIDE;
+ virtual void SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) OVERRIDE;
+
+ virtual void LoadProgram(const std::string& program) OVERRIDE;
+
+ private:
+ virtual void ClearBackend() OVERRIDE;
+
+ class ProgramCacheValue : public base::RefCounted<ProgramCacheValue> {
+ public:
+ ProgramCacheValue(GLsizei length,
+ GLenum format,
+ const char* data,
+ const std::string& program_hash,
+ const char* shader_0_hash,
+ const ShaderTranslator::VariableMap& attrib_map_0,
+ const ShaderTranslator::VariableMap& uniform_map_0,
+ const ShaderTranslator::VariableMap& varying_map_0,
+ const char* shader_1_hash,
+ const ShaderTranslator::VariableMap& attrib_map_1,
+ const ShaderTranslator::VariableMap& uniform_map_1,
+ const ShaderTranslator::VariableMap& varying_map_1,
+ MemoryProgramCache* program_cache);
+
+ GLsizei length() const {
+ return length_;
+ }
+
+ GLenum format() const {
+ return format_;
+ }
+
+ const char* data() const {
+ return data_.get();
+ }
+
+ const std::string& shader_0_hash() const {
+ return shader_0_hash_;
+ }
+
+ const ShaderTranslator::VariableMap& attrib_map_0() const {
+ return attrib_map_0_;
+ }
+
+ const ShaderTranslator::VariableMap& uniform_map_0() const {
+ return uniform_map_0_;
+ }
+
+ const ShaderTranslator::VariableMap& varying_map_0() const {
+ return varying_map_0_;
+ }
+
+ const std::string& shader_1_hash() const {
+ return shader_1_hash_;
+ }
+
+ const ShaderTranslator::VariableMap& attrib_map_1() const {
+ return attrib_map_1_;
+ }
+
+ const ShaderTranslator::VariableMap& uniform_map_1() const {
+ return uniform_map_1_;
+ }
+
+ const ShaderTranslator::VariableMap& varying_map_1() const {
+ return varying_map_1_;
+ }
+
+ private:
+ friend class base::RefCounted<ProgramCacheValue>;
+
+ ~ProgramCacheValue();
+
+ const GLsizei length_;
+ const GLenum format_;
+ const scoped_ptr<const char[]> data_;
+ const std::string program_hash_;
+ const std::string shader_0_hash_;
+ const ShaderTranslator::VariableMap attrib_map_0_;
+ const ShaderTranslator::VariableMap uniform_map_0_;
+ const ShaderTranslator::VariableMap varying_map_0_;
+ const std::string shader_1_hash_;
+ const ShaderTranslator::VariableMap attrib_map_1_;
+ const ShaderTranslator::VariableMap uniform_map_1_;
+ const ShaderTranslator::VariableMap varying_map_1_;
+ MemoryProgramCache* const program_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramCacheValue);
+ };
+
+ friend class ProgramCacheValue;
+
+ typedef base::MRUCache<std::string,
+ scoped_refptr<ProgramCacheValue> > ProgramMRUCache;
+
+ const size_t max_size_bytes_;
+ size_t curr_size_bytes_;
+ ProgramMRUCache store_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryProgramCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
diff --git a/gpu/command_buffer/service/memory_program_cache_unittest.cc b/gpu/command_buffer/service/memory_program_cache_unittest.cc
new file mode 100644
index 0000000..ba18ff4
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -0,0 +1,636 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/memory_program_cache.h"
+
+#include "base/bind.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::ElementsAreArray;
+using ::testing::Invoke;
+using ::testing::SetArgPointee;
+using ::testing::SetArrayArgument;
+
+namespace {
+typedef gpu::gles2::ShaderTranslator::VariableMap VariableMap;
+} // anonymous namespace
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramBinaryEmulator {
+ public:
+ ProgramBinaryEmulator(GLsizei length,
+ GLenum format,
+ const char* binary)
+ : length_(length),
+ format_(format),
+ binary_(binary) { }
+
+ void GetProgramBinary(GLuint program,
+ GLsizei buffer_size,
+ GLsizei* length,
+ GLenum* format,
+ GLvoid* binary) {
+ if (length) {
+ *length = length_;
+ }
+ *format = format_;
+ memcpy(binary, binary_, length_);
+ }
+
+ void ProgramBinary(GLuint program,
+ GLenum format,
+ const GLvoid* binary,
+ GLsizei length) {
+ // format and length are verified by matcher
+ EXPECT_EQ(0, memcmp(binary_, binary, length));
+ }
+
+ GLsizei length() const { return length_; }
+ GLenum format() const { return format_; }
+ const char* binary() const { return binary_; }
+
+ private:
+ GLsizei length_;
+ GLenum format_;
+ const char* binary_;
+};
+
+class MemoryProgramCacheTest : public GpuServiceTest {
+ public:
+ static const size_t kCacheSizeBytes = 1024;
+ static const GLuint kVertexShaderClientId = 90;
+ static const GLuint kVertexShaderServiceId = 100;
+ static const GLuint kFragmentShaderClientId = 91;
+ static const GLuint kFragmentShaderServiceId = 100;
+
+ MemoryProgramCacheTest()
+ : cache_(new MemoryProgramCache(kCacheSizeBytes)),
+ vertex_shader_(NULL),
+ fragment_shader_(NULL),
+ shader_cache_count_(0) { }
+ virtual ~MemoryProgramCacheTest() {
+ shader_manager_.Destroy(false);
+ }
+
+ void ShaderCacheCb(const std::string& key, const std::string& shader) {
+ shader_cache_count_++;
+ shader_cache_shader_ = shader;
+ }
+
+ int32 shader_cache_count() { return shader_cache_count_; }
+ const std::string& shader_cache_shader() { return shader_cache_shader_; }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ vertex_shader_ = shader_manager_.CreateShader(kVertexShaderClientId,
+ kVertexShaderServiceId,
+ GL_VERTEX_SHADER);
+ fragment_shader_ = shader_manager_.CreateShader(
+ kFragmentShaderClientId,
+ kFragmentShaderServiceId,
+ GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader_ != NULL);
+ ASSERT_TRUE(fragment_shader_ != NULL);
+ typedef ShaderTranslatorInterface::VariableInfo VariableInfo;
+ typedef ShaderTranslator::VariableMap VariableMap;
+ VariableMap vertex_attrib_map;
+ VariableMap vertex_uniform_map;
+ VariableMap vertex_varying_map;
+ VariableMap fragment_attrib_map;
+ VariableMap fragment_uniform_map;
+ VariableMap fragment_varying_map;
+
+ vertex_attrib_map["a"] = VariableInfo(1, 34, SH_PRECISION_LOWP, 0, "a");
+ vertex_uniform_map["a"] = VariableInfo(0, 10, SH_PRECISION_MEDIUMP, 1, "a");
+ vertex_uniform_map["b"] = VariableInfo(2, 3114, SH_PRECISION_HIGHP, 1, "b");
+ vertex_varying_map["c"] = VariableInfo(3, 2, SH_PRECISION_HIGHP, 1, "c");
+ fragment_attrib_map["jjjbb"] =
+ VariableInfo(463, 1114, SH_PRECISION_MEDIUMP, 0, "jjjbb");
+ fragment_uniform_map["k"] =
+ VariableInfo(10, 34413, SH_PRECISION_MEDIUMP, 1, "k");
+ fragment_varying_map["c"] = VariableInfo(3, 2, SH_PRECISION_HIGHP, 1, "c");
+
+ vertex_shader_->set_source("bbbalsldkdkdkd");
+ fragment_shader_->set_source("bbbal sldkdkdkas 134 ad");
+
+ TestHelper::SetShaderStates(
+ gl_.get(), vertex_shader_, true, NULL, NULL,
+ &vertex_attrib_map, &vertex_uniform_map, &vertex_varying_map,
+ NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fragment_shader_, true, NULL, NULL,
+ &fragment_attrib_map, &fragment_uniform_map, &fragment_varying_map,
+ NULL);
+ }
+
+ void SetExpectationsForSaveLinkedProgram(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_PROGRAM_BINARY_LENGTH_OES, _))
+ .WillOnce(SetArgPointee<2>(emulator->length()));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramBinary(program_id, emulator->length(), _, _, _))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::GetProgramBinary));
+ }
+
+ void SetExpectationsForLoadLinkedProgram(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ ProgramBinary(program_id,
+ emulator->format(),
+ _,
+ emulator->length()))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::ProgramBinary));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgPointee<2>(GL_TRUE));
+ }
+
+ void SetExpectationsForLoadLinkedProgramFailure(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ ProgramBinary(program_id,
+ emulator->format(),
+ _,
+ emulator->length()))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::ProgramBinary));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgPointee<2>(GL_FALSE));
+ }
+
+ scoped_ptr<MemoryProgramCache> cache_;
+ ShaderManager shader_manager_;
+ Shader* vertex_shader_;
+ Shader* fragment_shader_;
+ int32 shader_cache_count_;
+ std::string shader_cache_shader_;
+};
+
+TEST_F(MemoryProgramCacheTest, CacheSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(1, shader_cache_count());
+}
+
+TEST_F(MemoryProgramCacheTest, LoadProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(1, shader_cache_count());
+
+ cache_->Clear();
+
+ cache_->LoadProgram(shader_cache_shader());
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, CacheLoadMatchesSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+ EXPECT_EQ(1, shader_cache_count());
+
+ VariableMap vertex_attrib_map = vertex_shader_->attrib_map();
+ VariableMap vertex_uniform_map = vertex_shader_->uniform_map();
+ VariableMap vertex_varying_map = vertex_shader_->varying_map();
+ VariableMap fragment_attrib_map = fragment_shader_->attrib_map();
+ VariableMap fragment_uniform_map = fragment_shader_->uniform_map();
+ VariableMap fragment_varying_map = fragment_shader_->varying_map();
+
+ vertex_shader_->set_attrib_map(VariableMap());
+ vertex_shader_->set_uniform_map(VariableMap());
+ vertex_shader_->set_varying_map(VariableMap());
+ fragment_shader_->set_attrib_map(VariableMap());
+ fragment_shader_->set_uniform_map(VariableMap());
+ fragment_shader_->set_varying_map(VariableMap());
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ // apparently the hash_map implementation on android doesn't have the
+ // equality operator
+#if !defined(OS_ANDROID)
+ EXPECT_EQ(vertex_attrib_map, vertex_shader_->attrib_map());
+ EXPECT_EQ(vertex_uniform_map, vertex_shader_->uniform_map());
+ EXPECT_EQ(vertex_varying_map, vertex_shader_->varying_map());
+ EXPECT_EQ(fragment_attrib_map, fragment_shader_->attrib_map());
+ EXPECT_EQ(fragment_uniform_map, fragment_shader_->uniform_map());
+ EXPECT_EQ(fragment_varying_map, fragment_shader_->varying_map());
+#endif
+}
+
+TEST_F(MemoryProgramCacheTest, LoadProgramMatchesSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+ EXPECT_EQ(1, shader_cache_count());
+
+ VariableMap vertex_attrib_map = vertex_shader_->attrib_map();
+ VariableMap vertex_uniform_map = vertex_shader_->uniform_map();
+ VariableMap vertex_varying_map = vertex_shader_->varying_map();
+ VariableMap fragment_attrib_map = fragment_shader_->attrib_map();
+ VariableMap fragment_uniform_map = fragment_shader_->uniform_map();
+ VariableMap fragment_varying_map = fragment_shader_->varying_map();
+
+ vertex_shader_->set_attrib_map(VariableMap());
+ vertex_shader_->set_uniform_map(VariableMap());
+ vertex_shader_->set_varying_map(VariableMap());
+ fragment_shader_->set_attrib_map(VariableMap());
+ fragment_shader_->set_uniform_map(VariableMap());
+ fragment_shader_->set_varying_map(VariableMap());
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ cache_->Clear();
+ cache_->LoadProgram(shader_cache_shader());
+
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ // apparently the hash_map implementation on android doesn't have the
+ // equality operator
+#if !defined(OS_ANDROID)
+ EXPECT_EQ(vertex_attrib_map, vertex_shader_->attrib_map());
+ EXPECT_EQ(vertex_uniform_map, vertex_shader_->uniform_map());
+ EXPECT_EQ(vertex_varying_map, vertex_shader_->varying_map());
+ EXPECT_EQ(fragment_attrib_map, fragment_shader_->attrib_map());
+ EXPECT_EQ(fragment_uniform_map, fragment_shader_->uniform_map());
+ EXPECT_EQ(fragment_varying_map, fragment_shader_->varying_map());
+#endif
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnLinkFalse) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ SetExpectationsForLoadLinkedProgramFailure(kProgramId, &emulator);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnDifferentSource) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ const std::string vertex_orig_source = vertex_shader_->signature_source();
+ vertex_shader_->set_source("different!");
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ vertex_shader_->set_source(vertex_orig_source);
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ fragment_shader_->set_source("different!");
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnDifferentMap) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ ProgramCache::LocationMap binding_map;
+ binding_map["test"] = 512;
+ cache_->SaveLinkedProgram(kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ &binding_map,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ binding_map["different!"] = 59;
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ &binding_map,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, MemoryProgramCacheEviction) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator1(kBinaryLength, kFormat, test_binary);
+
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator1);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ const int kEvictingProgramId = 11;
+ const GLuint kEvictingBinaryLength = kCacheSizeBytes - kBinaryLength + 1;
+
+ // save old source and modify for new program
+ const std::string& old_source = fragment_shader_->signature_source();
+ fragment_shader_->set_source("al sdfkjdk");
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+
+ scoped_ptr<char[]> bigTestBinary =
+ scoped_ptr<char[]>(new char[kEvictingBinaryLength]);
+ for (size_t i = 0; i < kEvictingBinaryLength; ++i) {
+ bigTestBinary[i] = i % 250;
+ }
+ ProgramBinaryEmulator emulator2(kEvictingBinaryLength,
+ kFormat,
+ bigTestBinary.get());
+
+ SetExpectationsForSaveLinkedProgram(kEvictingProgramId, &emulator2);
+ cache_->SaveLinkedProgram(kEvictingProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN, cache_->GetLinkedProgramStatus(
+ old_source,
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, SaveCorrectProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator1(kBinaryLength, kFormat, test_binary);
+
+ vertex_shader_->set_source("different!");
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator1);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadCorrectProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ fragment_shader_->set_source("different!");
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, OverwriteOnNewSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+
+ char test_binary2[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary2[i] = (i*2) % 250;
+ }
+ ProgramBinaryEmulator emulator2(kBinaryLength, kFormat, test_binary2);
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator2);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator2);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/memory_tracking.h b/gpu/command_buffer/service/memory_tracking.h
new file mode 100644
index 0000000..1514325
--- /dev/null
+++ b/gpu/command_buffer/service/memory_tracking.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+
+namespace gpu {
+namespace gles2 {
+
+// A MemoryTracker is used to propagate per-ContextGroup memory usage
+// statistics to the global GpuMemoryManager.
+class MemoryTracker : public base::RefCounted<MemoryTracker> {
+ public:
+ enum Pool {
+ kUnmanaged,
+ kManaged
+ };
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) = 0;
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ virtual bool EnsureGPUMemoryAvailable(size_t size_needed) = 0;
+
+ protected:
+ friend class base::RefCounted<MemoryTracker>;
+ MemoryTracker() {}
+ virtual ~MemoryTracker() {};
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MemoryTracker);
+};
+
+// A MemoryTypeTracker tracks the use of a particular type of memory (buffer,
+// texture, or renderbuffer) and forward the result to a specified
+// MemoryTracker.
+class MemoryTypeTracker {
+ public:
+ MemoryTypeTracker(MemoryTracker* memory_tracker, MemoryTracker::Pool pool)
+ : memory_tracker_(memory_tracker),
+ pool_(pool),
+ has_done_update_(false),
+ mem_represented_(0),
+ mem_represented_at_last_update_(0) {
+ UpdateMemRepresented();
+ }
+
+ ~MemoryTypeTracker() {
+ UpdateMemRepresented();
+ }
+
+ void TrackMemAlloc(size_t bytes) {
+ mem_represented_ += bytes;
+ UpdateMemRepresented();
+ }
+
+ void TrackMemFree(size_t bytes) {
+ DCHECK(bytes <= mem_represented_);
+ mem_represented_ -= bytes;
+ UpdateMemRepresented();
+ }
+
+ size_t GetMemRepresented() const {
+ return mem_represented_at_last_update_;
+ }
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ bool EnsureGPUMemoryAvailable(size_t size_needed) {
+ if (memory_tracker_) {
+ return memory_tracker_->EnsureGPUMemoryAvailable(size_needed);
+ }
+ return true;
+ }
+
+ private:
+ void UpdateMemRepresented() {
+ // Skip redundant updates only if we have already done an update.
+ if (!has_done_update_ &&
+ mem_represented_ == mem_represented_at_last_update_) {
+ return;
+ }
+ if (memory_tracker_) {
+ memory_tracker_->TrackMemoryAllocatedChange(
+ mem_represented_at_last_update_,
+ mem_represented_,
+ pool_);
+ }
+ has_done_update_ = true;
+ mem_represented_at_last_update_ = mem_represented_;
+ }
+
+ MemoryTracker* memory_tracker_;
+ MemoryTracker::Pool pool_;
+ bool has_done_update_;
+ size_t mem_represented_;
+ size_t mem_represented_at_last_update_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryTypeTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
diff --git a/gpu/command_buffer/service/mocks.cc b/gpu/command_buffer/service/mocks.cc
new file mode 100644
index 0000000..143ec0b
--- /dev/null
+++ b/gpu/command_buffer/service/mocks.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+
+using testing::Invoke;
+using testing::_;
+
+namespace gpu {
+
+AsyncAPIMock::AsyncAPIMock(bool default_do_commands) {
+ testing::DefaultValue<error::Error>::Set(
+ error::kNoError);
+
+ if (default_do_commands) {
+ ON_CALL(*this, DoCommands(_, _, _, _))
+ .WillByDefault(Invoke(this, &AsyncAPIMock::FakeDoCommands));
+ }
+}
+
+AsyncAPIMock::~AsyncAPIMock() {}
+
+error::Error AsyncAPIMock::FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ return AsyncAPIInterface::DoCommands(
+ num_commands, buffer, num_entries, entries_processed);
+}
+
+void AsyncAPIMock::SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args) {
+ DCHECK(engine_);
+ DCHECK_EQ(1u, command);
+ DCHECK_EQ(1u, arg_count);
+ const cmd::SetToken* args =
+ static_cast<const cmd::SetToken*>(_args);
+ engine_->set_token(args->token);
+}
+
+namespace gles2 {
+
+MockShaderTranslator::MockShaderTranslator() {}
+
+MockShaderTranslator::~MockShaderTranslator() {}
+
+MockProgramCache::MockProgramCache() {}
+MockProgramCache::~MockProgramCache() {}
+
+MockMemoryTracker::MockMemoryTracker() {}
+MockMemoryTracker::~MockMemoryTracker() {}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/mocks.h b/gpu/command_buffer/service/mocks.h
new file mode 100644
index 0000000..17c8401
--- /dev/null
+++ b/gpu/command_buffer/service/mocks.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains definitions for mock objects, used for testing.
+
+// TODO(apatrick): This file "manually" defines some mock objects. Using gMock
+// would be definitely preferable, unfortunately it doesn't work on Windows yet.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+// Mocks an AsyncAPIInterface, using GMock.
+class AsyncAPIMock : public AsyncAPIInterface {
+ public:
+ explicit AsyncAPIMock(bool default_do_commands);
+ virtual ~AsyncAPIMock();
+
+ error::Error FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Predicate that matches args passed to DoCommand, by looking at the values.
+ class IsArgs {
+ public:
+ IsArgs(unsigned int arg_count, const void* args)
+ : arg_count_(arg_count),
+ args_(static_cast<CommandBufferEntry*>(const_cast<void*>(args))) {
+ }
+
+ bool operator() (const void* _args) const {
+ const CommandBufferEntry* args =
+ static_cast<const CommandBufferEntry*>(_args) + 1;
+ for (unsigned int i = 0; i < arg_count_; ++i) {
+ if (args[i].value_uint32 != args_[i].value_uint32) return false;
+ }
+ return true;
+ }
+
+ private:
+ unsigned int arg_count_;
+ CommandBufferEntry *args_;
+ };
+
+ MOCK_METHOD3(DoCommand, error::Error(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data));
+
+ MOCK_METHOD4(DoCommands,
+ error::Error(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed));
+
+ const char* GetCommandName(unsigned int command_id) const {
+ return "";
+ };
+
+ // Sets the engine, to forward SetToken commands to it.
+ void set_engine(CommandBufferEngine *engine) { engine_ = engine; }
+
+ // Forwards the SetToken commands to the engine.
+ void SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args);
+
+ private:
+ CommandBufferEngine *engine_;
+};
+
+namespace gles2 {
+
+class MockShaderTranslator : public ShaderTranslatorInterface {
+ public:
+ MockShaderTranslator();
+ virtual ~MockShaderTranslator();
+
+ MOCK_METHOD5(Init, bool(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds));
+ MOCK_CONST_METHOD7(Translate, bool(
+ const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map));
+ MOCK_CONST_METHOD0(
+ GetStringForOptionsThatWouldAffectCompilation, std::string());
+};
+
+class MockProgramCache : public ProgramCache {
+ public:
+ MockProgramCache();
+ virtual ~MockProgramCache();
+
+ MOCK_METHOD7(LoadLinkedProgram, ProgramLoadResult(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& callback));
+
+ MOCK_METHOD7(SaveLinkedProgram, void(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& callback));
+ MOCK_METHOD1(LoadProgram, void(const std::string&));
+
+ private:
+ MOCK_METHOD0(ClearBackend, void());
+};
+
+class MockMemoryTracker : public MemoryTracker {
+ public:
+ MockMemoryTracker();
+
+ MOCK_METHOD3(TrackMemoryAllocatedChange, void(
+ size_t old_size, size_t new_size, Pool pool));
+ MOCK_METHOD1(EnsureGPUMemoryAvailable, bool(size_t size_needed));
+
+ private:
+ friend class ::testing::StrictMock<MockMemoryTracker>;
+ friend class base::RefCounted< ::testing::StrictMock<MockMemoryTracker> >;
+ virtual ~MockMemoryTracker();
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
diff --git a/gpu/command_buffer/service/program_cache.cc b/gpu/command_buffer/service/program_cache.cc
new file mode 100644
index 0000000..ad395c7
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_cache.h"
+
+#include <string>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+ProgramCache::ProgramCache() {}
+ProgramCache::~ProgramCache() {}
+
+void ProgramCache::Clear() {
+ ClearBackend();
+ link_status_.clear();
+}
+
+ProgramCache::LinkedProgramStatus ProgramCache::GetLinkedProgramStatus(
+ const std::string& untranslated_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& untranslated_b,
+ const ShaderTranslatorInterface* translator_b,
+ const std::map<std::string, GLint>* bind_attrib_location_map) const {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(untranslated_a, translator_a, a_sha);
+ ComputeShaderHash(untranslated_b, translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ LinkStatusMap::const_iterator found = link_status_.find(sha_string);
+ if (found == link_status_.end()) {
+ return ProgramCache::LINK_UNKNOWN;
+ } else {
+ return found->second;
+ }
+}
+
+void ProgramCache::LinkedProgramCacheSuccess(
+ const std::string& shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(shader_a, translator_a, a_sha);
+ ComputeShaderHash(shader_b, translator_b, b_sha);
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ LinkedProgramCacheSuccess(sha_string);
+}
+
+void ProgramCache::LinkedProgramCacheSuccess(const std::string& program_hash) {
+ link_status_[program_hash] = LINK_SUCCEEDED;
+}
+
+void ProgramCache::ComputeShaderHash(
+ const std::string& str,
+ const ShaderTranslatorInterface* translator,
+ char* result) const {
+ std::string s((
+ translator ? translator->GetStringForOptionsThatWouldAffectCompilation() :
+ std::string()) + str);
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(s.c_str()),
+ s.length(), reinterpret_cast<unsigned char*>(result));
+}
+
+void ProgramCache::Evict(const std::string& program_hash) {
+ link_status_.erase(program_hash);
+}
+
+namespace {
+size_t CalculateMapSize(const std::map<std::string, GLint>* map) {
+ if (!map) {
+ return 0;
+ }
+ std::map<std::string, GLint>::const_iterator it;
+ size_t total = 0;
+ for (it = map->begin(); it != map->end(); ++it) {
+ total += 4 + it->first.length();
+ }
+ return total;
+}
+} // anonymous namespace
+
+void ProgramCache::ComputeProgramHash(
+ const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const std::map<std::string, GLint>* bind_attrib_location_map,
+ char* result) const {
+ const size_t shader0_size = kHashLength;
+ const size_t shader1_size = kHashLength;
+ const size_t map_size = CalculateMapSize(bind_attrib_location_map);
+ const size_t total_size = shader0_size + shader1_size + map_size;
+
+ scoped_ptr<unsigned char[]> buffer(new unsigned char[total_size]);
+ memcpy(buffer.get(), hashed_shader_0, shader0_size);
+ memcpy(&buffer[shader0_size], hashed_shader_1, shader1_size);
+ if (map_size != 0) {
+ // copy our map
+ size_t current_pos = shader0_size + shader1_size;
+ std::map<std::string, GLint>::const_iterator it;
+ for (it = bind_attrib_location_map->begin();
+ it != bind_attrib_location_map->end();
+ ++it) {
+ const size_t name_size = it->first.length();
+ memcpy(&buffer.get()[current_pos], it->first.c_str(), name_size);
+ current_pos += name_size;
+ const GLint value = it->second;
+ buffer[current_pos++] = value >> 24;
+ buffer[current_pos++] = value >> 16;
+ buffer[current_pos++] = value >> 8;
+ buffer[current_pos++] = value;
+ }
+ }
+ base::SHA1HashBytes(buffer.get(),
+ total_size, reinterpret_cast<unsigned char*>(result));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_cache.h b/gpu/command_buffer/service/program_cache.h
new file mode 100644
index 0000000..3fb5687
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
+
+#include <map>
+#include <string>
+
+#include "base/containers/hash_tables.h"
+#include "base/sha1.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Shader;
+class ShaderTranslator;
+
+// Program cache base class for caching linked gpu programs
+class GPU_EXPORT ProgramCache {
+ public:
+ static const size_t kHashLength = base::kSHA1Length;
+
+ typedef std::map<std::string, GLint> LocationMap;
+
+ enum LinkedProgramStatus {
+ LINK_UNKNOWN,
+ LINK_SUCCEEDED
+ };
+
+ enum ProgramLoadResult {
+ PROGRAM_LOAD_FAILURE,
+ PROGRAM_LOAD_SUCCESS
+ };
+
+ ProgramCache();
+ virtual ~ProgramCache();
+
+ LinkedProgramStatus GetLinkedProgramStatus(
+ const std::string& untranslated_shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& untranslated_shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map) const;
+
+ // Loads the linked program from the cache. If the program is not found or
+ // there was an error, PROGRAM_LOAD_FAILURE should be returned.
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) = 0;
+
+ // Saves the program into the cache. If successful, the implementation should
+ // call LinkedProgramCacheSuccess.
+ virtual void SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) = 0;
+
+ virtual void LoadProgram(const std::string& program) = 0;
+
+ // clears the cache
+ void Clear();
+
+ // Only for testing
+ void LinkedProgramCacheSuccess(const std::string& shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map);
+
+ protected:
+ // called by implementing class after a shader was successfully cached
+ void LinkedProgramCacheSuccess(const std::string& program_hash);
+
+ // result is not null terminated
+ void ComputeShaderHash(const std::string& shader,
+ const ShaderTranslatorInterface* translator,
+ char* result) const;
+
+ // result is not null terminated. hashed shaders are expected to be
+ // kHashLength in length
+ void ComputeProgramHash(
+ const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const LocationMap* bind_attrib_location_map,
+ char* result) const;
+
+ void Evict(const std::string& program_hash);
+
+ private:
+ typedef base::hash_map<std::string,
+ LinkedProgramStatus> LinkStatusMap;
+
+ // called to clear the backend cache
+ virtual void ClearBackend() = 0;
+
+ LinkStatusMap link_status_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
diff --git a/gpu/command_buffer/service/program_cache_unittest.cc b/gpu/command_buffer/service/program_cache_unittest.cc
new file mode 100644
index 0000000..4e2abc3
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_cache.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Return;
+
+namespace gpu {
+namespace gles2 {
+
+class NoBackendProgramCache : public ProgramCache {
+ public:
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint /* program */,
+ Shader* /* shader_a */,
+ const ShaderTranslatorInterface* /* translator_a */,
+ Shader* /* shader_b */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const LocationMap* /* bind_attrib_location_map */,
+ const ShaderCacheCallback& /* callback */) OVERRIDE {
+ return PROGRAM_LOAD_SUCCESS;
+ }
+ virtual void SaveLinkedProgram(
+ GLuint /* program */,
+ const Shader* /* shader_a */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const Shader* /* shader_b */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const LocationMap* /* bind_attrib_location_map */,
+ const ShaderCacheCallback& /* callback */) OVERRIDE { }
+
+ virtual void LoadProgram(const std::string& /* program */) OVERRIDE {}
+
+ virtual void ClearBackend() OVERRIDE {}
+
+ void SaySuccessfullyCached(const std::string& shader1,
+ const ShaderTranslatorInterface* translator_1,
+ const std::string& shader2,
+ const ShaderTranslatorInterface* translator_2,
+ std::map<std::string, GLint>* attrib_map) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(shader1, translator_1, a_sha);
+ ComputeShaderHash(shader2, translator_2, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ attrib_map,
+ sha);
+ const std::string shaString(sha, kHashLength);
+
+ LinkedProgramCacheSuccess(shaString);
+ }
+
+ void ComputeShaderHash(const std::string& shader,
+ const ShaderTranslatorInterface* translator,
+ char* result) const {
+ ProgramCache::ComputeShaderHash(shader, translator, result);
+ }
+
+ void ComputeProgramHash(const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const LocationMap* bind_attrib_location_map,
+ char* result) const {
+ ProgramCache::ComputeProgramHash(hashed_shader_0,
+ hashed_shader_1,
+ bind_attrib_location_map,
+ result);
+ }
+
+ void Evict(const std::string& program_hash) {
+ ProgramCache::Evict(program_hash);
+ }
+};
+
+class ProgramCacheTest : public testing::Test {
+ public:
+ ProgramCacheTest() :
+ cache_(new NoBackendProgramCache()) { }
+
+ protected:
+ scoped_ptr<NoBackendProgramCache> cache_;
+};
+
+TEST_F(ProgramCacheTest, LinkStatusSave) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ {
+ std::string shader_a = shader1;
+ std::string shader_b = shader2;
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(
+ shader_a, NULL, shader_b, NULL, NULL));
+ cache_->SaySuccessfullyCached(shader_a, NULL, shader_b, NULL, NULL);
+
+ shader_a.clear();
+ shader_b.clear();
+ }
+ // make sure it was copied
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED,
+ cache_->GetLinkedProgramStatus(
+ shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, LinkUnknownOnFragmentSourceChange) {
+ const std::string shader1 = "abcd1234";
+ std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+
+ shader2 = "different!";
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, LinkUnknownOnVertexSourceChange) {
+ std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+
+ shader1 = "different!";
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, StatusEviction) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ char a_sha[ProgramCache::kHashLength];
+ char b_sha[ProgramCache::kHashLength];
+ cache_->ComputeShaderHash(shader1, NULL, a_sha);
+ cache_->ComputeShaderHash(shader2, NULL, b_sha);
+
+ char sha[ProgramCache::kHashLength];
+ cache_->ComputeProgramHash(a_sha,
+ b_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, EvictionWithReusedShader) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ const std::string shader3 = "asbjbbjj239a";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ cache_->SaySuccessfullyCached(shader1, NULL, shader3, NULL, NULL);
+
+ char a_sha[ProgramCache::kHashLength];
+ char b_sha[ProgramCache::kHashLength];
+ char c_sha[ProgramCache::kHashLength];
+ cache_->ComputeShaderHash(shader1, NULL, a_sha);
+ cache_->ComputeShaderHash(shader2, NULL, b_sha);
+ cache_->ComputeShaderHash(shader3, NULL, c_sha);
+
+ char sha[ProgramCache::kHashLength];
+ cache_->ComputeProgramHash(a_sha,
+ b_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+
+
+ cache_->ComputeProgramHash(a_sha,
+ c_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, StatusClear) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ const std::string shader3 = "asbjbbjj239a";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ cache_->SaySuccessfullyCached(shader1, NULL, shader3, NULL, NULL);
+ cache_->Clear();
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_manager.cc b/gpu/command_buffer/service/program_manager.cc
new file mode 100644
index 0000000..4dd4bc4
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager.cc
@@ -0,0 +1,1374 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_manager.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "third_party/re2/re2/re2.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+struct UniformType {
+ explicit UniformType(const ShaderTranslator::VariableInfo uniform)
+ : type(uniform.type),
+ size(uniform.size),
+ precision(uniform.precision) { }
+
+ UniformType()
+ : type(0),
+ size(0),
+ precision(SH_PRECISION_MEDIUMP) { }
+
+ bool operator==(const UniformType& other) const {
+ return type == other.type &&
+ size == other.size &&
+ precision == other.precision;
+ }
+
+ int type;
+ int size;
+ int precision;
+};
+
+int ShaderTypeToIndex(GLenum shader_type) {
+ switch (shader_type) {
+ case GL_VERTEX_SHADER:
+ return 0;
+ case GL_FRAGMENT_SHADER:
+ return 1;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+// Given a name like "foo.bar[123].moo[456]" sets new_name to "foo.bar[123].moo"
+// and sets element_index to 456. returns false if element expression was not a
+// whole decimal number. For example: "foo[1b2]"
+bool GetUniformNameSansElement(
+ const std::string& name, int* element_index, std::string* new_name) {
+ DCHECK(element_index);
+ DCHECK(new_name);
+ if (name.size() < 3 || name[name.size() - 1] != ']') {
+ *element_index = 0;
+ *new_name = name;
+ return true;
+ }
+
+ // Look for an array specification.
+ size_t open_pos = name.find_last_of('[');
+ if (open_pos == std::string::npos ||
+ open_pos >= name.size() - 2) {
+ return false;
+ }
+
+ GLint index = 0;
+ size_t last = name.size() - 1;
+ for (size_t pos = open_pos + 1; pos < last; ++pos) {
+ int8 digit = name[pos] - '0';
+ if (digit < 0 || digit > 9) {
+ return false;
+ }
+ index = index * 10 + digit;
+ }
+
+ *element_index = index;
+ *new_name = name.substr(0, open_pos);
+ return true;
+}
+
+bool IsBuiltInVarying(const std::string& name) {
+ // Built-in variables.
+ const char* kBuiltInVaryings[] = {
+ "gl_FragCoord",
+ "gl_FrontFacing",
+ "gl_PointCoord"
+ };
+ for (size_t ii = 0; ii < arraysize(kBuiltInVaryings); ++ii) {
+ if (name == kBuiltInVaryings[ii])
+ return true;
+ }
+ return false;
+}
+
+} // anonymous namespace.
+
+Program::UniformInfo::UniformInfo()
+ : size(0),
+ type(GL_NONE),
+ fake_location_base(0),
+ is_array(false) {
+}
+
+Program::UniformInfo::UniformInfo(GLsizei _size,
+ GLenum _type,
+ int _fake_location_base,
+ const std::string& _name)
+ : size(_size),
+ type(_type),
+ accepts_api_type(0),
+ fake_location_base(_fake_location_base),
+ is_array(false),
+ name(_name) {
+ switch (type) {
+ case GL_INT:
+ accepts_api_type = kUniform1i;
+ break;
+ case GL_INT_VEC2:
+ accepts_api_type = kUniform2i;
+ break;
+ case GL_INT_VEC3:
+ accepts_api_type = kUniform3i;
+ break;
+ case GL_INT_VEC4:
+ accepts_api_type = kUniform4i;
+ break;
+
+ case GL_BOOL:
+ accepts_api_type = kUniform1i | kUniform1f;
+ break;
+ case GL_BOOL_VEC2:
+ accepts_api_type = kUniform2i | kUniform2f;
+ break;
+ case GL_BOOL_VEC3:
+ accepts_api_type = kUniform3i | kUniform3f;
+ break;
+ case GL_BOOL_VEC4:
+ accepts_api_type = kUniform4i | kUniform4f;
+ break;
+
+ case GL_FLOAT:
+ accepts_api_type = kUniform1f;
+ break;
+ case GL_FLOAT_VEC2:
+ accepts_api_type = kUniform2f;
+ break;
+ case GL_FLOAT_VEC3:
+ accepts_api_type = kUniform3f;
+ break;
+ case GL_FLOAT_VEC4:
+ accepts_api_type = kUniform4f;
+ break;
+
+ case GL_FLOAT_MAT2:
+ accepts_api_type = kUniformMatrix2f;
+ break;
+ case GL_FLOAT_MAT3:
+ accepts_api_type = kUniformMatrix3f;
+ break;
+ case GL_FLOAT_MAT4:
+ accepts_api_type = kUniformMatrix4f;
+ break;
+
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_2D_RECT_ARB:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_EXTERNAL_OES:
+ accepts_api_type = kUniform1i;
+ break;
+ default:
+ NOTREACHED() << "Unhandled UniformInfo type " << type;
+ break;
+ }
+}
+
+Program::UniformInfo::~UniformInfo() {}
+
+bool ProgramManager::IsInvalidPrefix(const char* name, size_t length) {
+ static const char kInvalidPrefix[] = { 'g', 'l', '_' };
+ return (length >= sizeof(kInvalidPrefix) &&
+ memcmp(name, kInvalidPrefix, sizeof(kInvalidPrefix)) == 0);
+}
+
+Program::Program(
+ ProgramManager* manager, GLuint service_id)
+ : manager_(manager),
+ use_count_(0),
+ max_attrib_name_length_(0),
+ max_uniform_name_length_(0),
+ service_id_(service_id),
+ deleted_(false),
+ valid_(false),
+ link_status_(false),
+ uniforms_cleared_(false),
+ num_uniforms_(0) {
+ manager_->StartTracking(this);
+}
+
+void Program::Reset() {
+ valid_ = false;
+ link_status_ = false;
+ num_uniforms_ = 0;
+ max_uniform_name_length_ = 0;
+ max_attrib_name_length_ = 0;
+ attrib_infos_.clear();
+ uniform_infos_.clear();
+ sampler_indices_.clear();
+ attrib_location_to_index_map_.clear();
+}
+
+std::string Program::ProcessLogInfo(
+ const std::string& log) {
+ std::string output;
+ re2::StringPiece input(log);
+ std::string prior_log;
+ std::string hashed_name;
+ while (RE2::Consume(&input,
+ "(.*?)(webgl_[0123456789abcdefABCDEF]+)",
+ &prior_log,
+ &hashed_name)) {
+ output += prior_log;
+
+ const std::string* original_name =
+ GetOriginalNameFromHashedName(hashed_name);
+ if (original_name)
+ output += *original_name;
+ else
+ output += hashed_name;
+ }
+
+ return output + input.as_string();
+}
+
+void Program::UpdateLogInfo() {
+ GLint max_len = 0;
+ glGetProgramiv(service_id_, GL_INFO_LOG_LENGTH, &max_len);
+ if (max_len == 0) {
+ set_log_info(NULL);
+ return;
+ }
+ scoped_ptr<char[]> temp(new char[max_len]);
+ GLint len = 0;
+ glGetProgramInfoLog(service_id_, max_len, &len, temp.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || temp[len] == '\0');
+ std::string log(temp.get(), len);
+ set_log_info(ProcessLogInfo(log).c_str());
+}
+
+void Program::ClearUniforms(
+ std::vector<uint8>* zero_buffer) {
+ DCHECK(zero_buffer);
+ if (uniforms_cleared_) {
+ return;
+ }
+ uniforms_cleared_ = true;
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& uniform_info = uniform_infos_[ii];
+ if (!uniform_info.IsValid()) {
+ continue;
+ }
+ GLint location = uniform_info.element_locations[0];
+ GLsizei size = uniform_info.size;
+ uint32 unit_size = GLES2Util::GetGLDataTypeSizeForUniforms(
+ uniform_info.type);
+ uint32 size_needed = size * unit_size;
+ if (size_needed > zero_buffer->size()) {
+ zero_buffer->resize(size_needed, 0u);
+ }
+ const void* zero = &(*zero_buffer)[0];
+ switch (uniform_info.type) {
+ case GL_FLOAT:
+ glUniform1fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC2:
+ glUniform2fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC3:
+ glUniform3fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC4:
+ glUniform4fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_INT:
+ case GL_BOOL:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_2D_RECT_ARB:
+ glUniform1iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC2:
+ case GL_BOOL_VEC2:
+ glUniform2iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC3:
+ case GL_BOOL_VEC3:
+ glUniform3iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC4:
+ case GL_BOOL_VEC4:
+ glUniform4iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_FLOAT_MAT2:
+ glUniformMatrix2fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_MAT3:
+ glUniformMatrix3fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_MAT4:
+ glUniformMatrix4fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+}
+
+namespace {
+
+struct UniformData {
+ UniformData() : size(-1), type(GL_NONE), location(0), added(false) {
+ }
+ std::string queried_name;
+ std::string corrected_name;
+ std::string original_name;
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ bool added;
+};
+
+struct UniformDataComparer {
+ bool operator()(const UniformData& lhs, const UniformData& rhs) const {
+ return lhs.queried_name < rhs.queried_name;
+ }
+};
+
+} // anonymous namespace
+
+void Program::Update() {
+ Reset();
+ UpdateLogInfo();
+ link_status_ = true;
+ uniforms_cleared_ = false;
+ GLint num_attribs = 0;
+ GLint max_len = 0;
+ GLint max_location = -1;
+ glGetProgramiv(service_id_, GL_ACTIVE_ATTRIBUTES, &num_attribs);
+ glGetProgramiv(service_id_, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, &max_len);
+ // TODO(gman): Should we check for error?
+ scoped_ptr<char[]> name_buffer(new char[max_len]);
+ for (GLint ii = 0; ii < num_attribs; ++ii) {
+ GLsizei length = 0;
+ GLsizei size = 0;
+ GLenum type = 0;
+ glGetActiveAttrib(
+ service_id_, ii, max_len, &length, &size, &type, name_buffer.get());
+ DCHECK(max_len == 0 || length < max_len);
+ DCHECK(length == 0 || name_buffer[length] == '\0');
+ if (!ProgramManager::IsInvalidPrefix(name_buffer.get(), length)) {
+ std::string name;
+ std::string original_name;
+ GetCorrectedVariableInfo(
+ false, name_buffer.get(), &name, &original_name, &size, &type);
+ // TODO(gman): Should we check for error?
+ GLint location = glGetAttribLocation(service_id_, name_buffer.get());
+ if (location > max_location) {
+ max_location = location;
+ }
+ attrib_infos_.push_back(
+ VertexAttrib(size, type, original_name, location));
+ max_attrib_name_length_ = std::max(
+ max_attrib_name_length_, static_cast<GLsizei>(original_name.size()));
+ }
+ }
+
+ // Create attrib location to index map.
+ attrib_location_to_index_map_.resize(max_location + 1);
+ for (GLint ii = 0; ii <= max_location; ++ii) {
+ attrib_location_to_index_map_[ii] = -1;
+ }
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ attrib_location_to_index_map_[info.location] = ii;
+ }
+
+#if !defined(NDEBUG)
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)) {
+ DVLOG(1) << "----: attribs for service_id: " << service_id();
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ DVLOG(1) << ii << ": loc = " << info.location
+ << ", size = " << info.size
+ << ", type = " << GLES2Util::GetStringEnum(info.type)
+ << ", name = " << info.name;
+ }
+ }
+#endif
+
+ max_len = 0;
+ GLint num_uniforms = 0;
+ glGetProgramiv(service_id_, GL_ACTIVE_UNIFORMS, &num_uniforms);
+ glGetProgramiv(service_id_, GL_ACTIVE_UNIFORM_MAX_LENGTH, &max_len);
+ name_buffer.reset(new char[max_len]);
+
+ // Reads all the names.
+ std::vector<UniformData> uniform_data;
+ for (GLint ii = 0; ii < num_uniforms; ++ii) {
+ GLsizei length = 0;
+ UniformData data;
+ glGetActiveUniform(
+ service_id_, ii, max_len, &length,
+ &data.size, &data.type, name_buffer.get());
+ DCHECK(max_len == 0 || length < max_len);
+ DCHECK(length == 0 || name_buffer[length] == '\0');
+ if (!ProgramManager::IsInvalidPrefix(name_buffer.get(), length)) {
+ data.queried_name = std::string(name_buffer.get());
+ GetCorrectedVariableInfo(
+ true, name_buffer.get(), &data.corrected_name, &data.original_name,
+ &data.size, &data.type);
+ uniform_data.push_back(data);
+ }
+ }
+
+ // NOTE: We don't care if 2 uniforms are bound to the same location.
+ // One of them will take preference. The spec allows this, same as
+ // BindAttribLocation.
+ //
+ // The reason we don't check is if we were to fail we'd have to
+ // restore the previous program but since we've already linked successfully
+ // at this point the previous program is gone.
+
+ // Assigns the uniforms with bindings.
+ size_t next_available_index = 0;
+ for (size_t ii = 0; ii < uniform_data.size(); ++ii) {
+ UniformData& data = uniform_data[ii];
+ data.location = glGetUniformLocation(
+ service_id_, data.queried_name.c_str());
+ // remove "[0]"
+ std::string short_name;
+ int element_index = 0;
+ bool good ALLOW_UNUSED = GetUniformNameSansElement(
+ data.queried_name, &element_index, &short_name);\
+ DCHECK(good);
+ LocationMap::const_iterator it = bind_uniform_location_map_.find(
+ short_name);
+ if (it != bind_uniform_location_map_.end()) {
+ data.added = AddUniformInfo(
+ data.size, data.type, data.location, it->second, data.corrected_name,
+ data.original_name, &next_available_index);
+ }
+ }
+
+ // Assigns the uniforms that were not bound.
+ for (size_t ii = 0; ii < uniform_data.size(); ++ii) {
+ const UniformData& data = uniform_data[ii];
+ if (!data.added) {
+ AddUniformInfo(
+ data.size, data.type, data.location, -1, data.corrected_name,
+ data.original_name, &next_available_index);
+ }
+ }
+
+#if !defined(NDEBUG)
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)) {
+ DVLOG(1) << "----: uniforms for service_id: " << service_id();
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ DVLOG(1) << ii << ": loc = " << info.element_locations[0]
+ << ", size = " << info.size
+ << ", type = " << GLES2Util::GetStringEnum(info.type)
+ << ", name = " << info.name;
+ }
+ }
+ }
+#endif
+
+ valid_ = true;
+}
+
+void Program::ExecuteBindAttribLocationCalls() {
+ for (LocationMap::const_iterator it = bind_attrib_location_map_.begin();
+ it != bind_attrib_location_map_.end(); ++it) {
+ const std::string* mapped_name = GetAttribMappedName(it->first);
+ if (mapped_name && *mapped_name != it->first)
+ glBindAttribLocation(service_id_, it->second, mapped_name->c_str());
+ }
+}
+
+bool Program::Link(ShaderManager* manager,
+ ShaderTranslator* vertex_translator,
+ ShaderTranslator* fragment_translator,
+ Program::VaryingsPackingOption varyings_packing_option,
+ const ShaderCacheCallback& shader_callback) {
+ ClearLinkStatus();
+ if (!CanLink()) {
+ set_log_info("missing shaders");
+ return false;
+ }
+ if (DetectAttribLocationBindingConflicts()) {
+ set_log_info("glBindAttribLocation() conflicts");
+ return false;
+ }
+ std::string conflicting_name;
+ if (DetectUniformsMismatch(&conflicting_name)) {
+ std::string info_log = "Uniforms with the same name but different "
+ "type/precision: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (DetectVaryingsMismatch(&conflicting_name)) {
+ std::string info_log = "Varyings with the same name but different type, "
+ "or statically used varyings in fragment shader are "
+ "not declared in vertex shader: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (DetectGlobalNameConflicts(&conflicting_name)) {
+ std::string info_log = "Name conflicts between an uniform and an "
+ "attribute: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (!CheckVaryingsPacking(varyings_packing_option)) {
+ set_log_info("Varyings over maximum register limit");
+ return false;
+ }
+
+ TimeTicks before_time = TimeTicks::HighResNow();
+ bool link = true;
+ ProgramCache* cache = manager_->program_cache_;
+ if (cache) {
+ DCHECK(!attached_shaders_[0]->signature_source().empty() &&
+ !attached_shaders_[1]->signature_source().empty());
+ ProgramCache::LinkedProgramStatus status = cache->GetLinkedProgramStatus(
+ attached_shaders_[0]->signature_source(),
+ vertex_translator,
+ attached_shaders_[1]->signature_source(),
+ fragment_translator,
+ &bind_attrib_location_map_);
+
+ if (status == ProgramCache::LINK_SUCCEEDED) {
+ ProgramCache::ProgramLoadResult success =
+ cache->LoadLinkedProgram(service_id(),
+ attached_shaders_[0].get(),
+ vertex_translator,
+ attached_shaders_[1].get(),
+ fragment_translator,
+ &bind_attrib_location_map_,
+ shader_callback);
+ link = success != ProgramCache::PROGRAM_LOAD_SUCCESS;
+ UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.LoadBinarySuccess", !link);
+ }
+ }
+
+ if (link) {
+ ExecuteBindAttribLocationCalls();
+ before_time = TimeTicks::HighResNow();
+ if (cache && gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary) {
+ glProgramParameteri(service_id(),
+ PROGRAM_BINARY_RETRIEVABLE_HINT,
+ GL_TRUE);
+ }
+ glLinkProgram(service_id());
+ }
+
+ GLint success = 0;
+ glGetProgramiv(service_id(), GL_LINK_STATUS, &success);
+ if (success == GL_TRUE) {
+ Update();
+ if (link) {
+ if (cache) {
+ cache->SaveLinkedProgram(service_id(),
+ attached_shaders_[0].get(),
+ vertex_translator,
+ attached_shaders_[1].get(),
+ fragment_translator,
+ &bind_attrib_location_map_,
+ shader_callback);
+ }
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "GPU.ProgramCache.BinaryCacheMissTime",
+ (TimeTicks::HighResNow() - before_time).InMicroseconds(),
+ 0,
+ TimeDelta::FromSeconds(10).InMicroseconds(),
+ 50);
+ } else {
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "GPU.ProgramCache.BinaryCacheHitTime",
+ (TimeTicks::HighResNow() - before_time).InMicroseconds(),
+ 0,
+ TimeDelta::FromSeconds(1).InMicroseconds(),
+ 50);
+ }
+ } else {
+ UpdateLogInfo();
+ }
+ return success == GL_TRUE;
+}
+
+void Program::Validate() {
+ if (!IsValid()) {
+ set_log_info("program not linked");
+ return;
+ }
+ glValidateProgram(service_id());
+ UpdateLogInfo();
+}
+
+GLint Program::GetUniformFakeLocation(
+ const std::string& name) const {
+ bool getting_array_location = false;
+ size_t open_pos = std::string::npos;
+ int index = 0;
+ if (!GLES2Util::ParseUniformName(
+ name, &open_pos, &index, &getting_array_location)) {
+ return -1;
+ }
+ for (GLuint ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (!info.IsValid()) {
+ continue;
+ }
+ if (info.name == name ||
+ (info.is_array &&
+ info.name.compare(0, info.name.size() - 3, name) == 0)) {
+ return info.fake_location_base;
+ } else if (getting_array_location && info.is_array) {
+ // Look for an array specification.
+ size_t open_pos_2 = info.name.find_last_of('[');
+ if (open_pos_2 == open_pos &&
+ name.compare(0, open_pos, info.name, 0, open_pos) == 0) {
+ if (index >= 0 && index < info.size) {
+ DCHECK_GT(static_cast<int>(info.element_locations.size()), index);
+ if (info.element_locations[index] == -1)
+ return -1;
+ return ProgramManager::MakeFakeLocation(
+ info.fake_location_base, index);
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+GLint Program::GetAttribLocation(
+ const std::string& name) const {
+ for (GLuint ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ if (info.name == name) {
+ return info.location;
+ }
+ }
+ return -1;
+}
+
+const Program::UniformInfo*
+ Program::GetUniformInfoByFakeLocation(
+ GLint fake_location, GLint* real_location, GLint* array_index) const {
+ DCHECK(real_location);
+ DCHECK(array_index);
+ if (fake_location < 0) {
+ return NULL;
+ }
+
+ GLint uniform_index = GetUniformInfoIndexFromFakeLocation(fake_location);
+ if (uniform_index >= 0 &&
+ static_cast<size_t>(uniform_index) < uniform_infos_.size()) {
+ const UniformInfo& uniform_info = uniform_infos_[uniform_index];
+ if (!uniform_info.IsValid()) {
+ return NULL;
+ }
+ GLint element_index = GetArrayElementIndexFromFakeLocation(fake_location);
+ if (element_index < uniform_info.size) {
+ *real_location = uniform_info.element_locations[element_index];
+ *array_index = element_index;
+ return &uniform_info;
+ }
+ }
+ return NULL;
+}
+
+const std::string* Program::GetAttribMappedName(
+ const std::string& original_name) const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const std::string* mapped_name =
+ shader->GetAttribMappedName(original_name);
+ if (mapped_name)
+ return mapped_name;
+ }
+ }
+ return NULL;
+}
+
+const std::string* Program::GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const std::string* original_name =
+ shader->GetOriginalNameFromHashedName(hashed_name);
+ if (original_name)
+ return original_name;
+ }
+ }
+ return NULL;
+}
+
+bool Program::SetUniformLocationBinding(
+ const std::string& name, GLint location) {
+ std::string short_name;
+ int element_index = 0;
+ if (!GetUniformNameSansElement(name, &element_index, &short_name) ||
+ element_index != 0) {
+ return false;
+ }
+
+ bind_uniform_location_map_[short_name] = location;
+ return true;
+}
+
+// Note: This is only valid to call right after a program has been linked
+// successfully.
+void Program::GetCorrectedVariableInfo(
+ bool use_uniforms,
+ const std::string& name, std::string* corrected_name,
+ std::string* original_name,
+ GLsizei* size, GLenum* type) const {
+ DCHECK(corrected_name);
+ DCHECK(original_name);
+ DCHECK(size);
+ DCHECK(type);
+ const char* kArraySpec = "[0]";
+ for (int jj = 0; jj < 2; ++jj) {
+ std::string test_name(name + ((jj == 1) ? kArraySpec : ""));
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const Shader::VariableInfo* variable_info =
+ use_uniforms ? shader->GetUniformInfo(test_name) :
+ shader->GetAttribInfo(test_name);
+ // Note: There is an assuption here that if an attrib is defined in more
+ // than 1 attached shader their types and sizes match. Should we check
+ // for that case?
+ if (variable_info) {
+ *corrected_name = test_name;
+ *original_name = variable_info->name;
+ *type = variable_info->type;
+ *size = variable_info->size;
+ return;
+ }
+ }
+ }
+ }
+ *corrected_name = name;
+ *original_name = name;
+}
+
+bool Program::AddUniformInfo(
+ GLsizei size, GLenum type, GLint location, GLint fake_base_location,
+ const std::string& name, const std::string& original_name,
+ size_t* next_available_index) {
+ DCHECK(next_available_index);
+ const char* kArraySpec = "[0]";
+ size_t uniform_index =
+ fake_base_location >= 0 ? fake_base_location : *next_available_index;
+ if (uniform_infos_.size() < uniform_index + 1) {
+ uniform_infos_.resize(uniform_index + 1);
+ }
+
+ // return if this location is already in use.
+ if (uniform_infos_[uniform_index].IsValid()) {
+ DCHECK_GE(fake_base_location, 0);
+ return false;
+ }
+
+ uniform_infos_[uniform_index] = UniformInfo(
+ size, type, uniform_index, original_name);
+ ++num_uniforms_;
+
+ UniformInfo& info = uniform_infos_[uniform_index];
+ info.element_locations.resize(size);
+ info.element_locations[0] = location;
+ DCHECK_GE(size, 0);
+ size_t num_texture_units = info.IsSampler() ? static_cast<size_t>(size) : 0u;
+ info.texture_units.clear();
+ info.texture_units.resize(num_texture_units, 0);
+
+ if (size > 1) {
+ // Go through the array element locations looking for a match.
+ // We can skip the first element because it's the same as the
+ // the location without the array operators.
+ size_t array_pos = name.rfind(kArraySpec);
+ std::string base_name = name;
+ if (name.size() > 3) {
+ if (array_pos != name.size() - 3) {
+ info.name = name + kArraySpec;
+ } else {
+ base_name = name.substr(0, name.size() - 3);
+ }
+ }
+ for (GLsizei ii = 1; ii < info.size; ++ii) {
+ std::string element_name(base_name + "[" + base::IntToString(ii) + "]");
+ info.element_locations[ii] =
+ glGetUniformLocation(service_id_, element_name.c_str());
+ }
+ }
+
+ info.is_array =
+ (size > 1 ||
+ (info.name.size() > 3 &&
+ info.name.rfind(kArraySpec) == info.name.size() - 3));
+
+ if (info.IsSampler()) {
+ sampler_indices_.push_back(info.fake_location_base);
+ }
+ max_uniform_name_length_ =
+ std::max(max_uniform_name_length_,
+ static_cast<GLsizei>(info.name.size()));
+
+ while (*next_available_index < uniform_infos_.size() &&
+ uniform_infos_[*next_available_index].IsValid()) {
+ *next_available_index = *next_available_index + 1;
+ }
+
+ return true;
+}
+
+const Program::UniformInfo*
+ Program::GetUniformInfo(
+ GLint index) const {
+ if (static_cast<size_t>(index) >= uniform_infos_.size()) {
+ return NULL;
+ }
+
+ const UniformInfo& info = uniform_infos_[index];
+ return info.IsValid() ? &info : NULL;
+}
+
+bool Program::SetSamplers(
+ GLint num_texture_units, GLint fake_location,
+ GLsizei count, const GLint* value) {
+ if (fake_location < 0) {
+ return true;
+ }
+ GLint uniform_index = GetUniformInfoIndexFromFakeLocation(fake_location);
+ if (uniform_index >= 0 &&
+ static_cast<size_t>(uniform_index) < uniform_infos_.size()) {
+ UniformInfo& info = uniform_infos_[uniform_index];
+ if (!info.IsValid()) {
+ return false;
+ }
+ GLint element_index = GetArrayElementIndexFromFakeLocation(fake_location);
+ if (element_index < info.size) {
+ count = std::min(info.size - element_index, count);
+ if (info.IsSampler() && count > 0) {
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (value[ii] < 0 || value[ii] >= num_texture_units) {
+ return false;
+ }
+ }
+ std::copy(value, value + count,
+ info.texture_units.begin() + element_index);
+ return true;
+ }
+ }
+ }
+ return true;
+}
+
+void Program::GetProgramiv(GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_ACTIVE_ATTRIBUTES:
+ *params = attrib_infos_.size();
+ break;
+ case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = max_attrib_name_length_ + 1;
+ break;
+ case GL_ACTIVE_UNIFORMS:
+ *params = num_uniforms_;
+ break;
+ case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = max_uniform_name_length_ + 1;
+ break;
+ case GL_LINK_STATUS:
+ *params = link_status_;
+ break;
+ case GL_INFO_LOG_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = log_info_.get() ? (log_info_->size() + 1) : 0;
+ break;
+ case GL_DELETE_STATUS:
+ *params = deleted_;
+ break;
+ case GL_VALIDATE_STATUS:
+ if (!IsValid()) {
+ *params = GL_FALSE;
+ } else {
+ glGetProgramiv(service_id_, pname, params);
+ }
+ break;
+ default:
+ glGetProgramiv(service_id_, pname, params);
+ break;
+ }
+}
+
+bool Program::AttachShader(
+ ShaderManager* shader_manager,
+ Shader* shader) {
+ DCHECK(shader_manager);
+ DCHECK(shader);
+ int index = ShaderTypeToIndex(shader->shader_type());
+ if (attached_shaders_[index].get() != NULL) {
+ return false;
+ }
+ attached_shaders_[index] = scoped_refptr<Shader>(shader);
+ shader_manager->UseShader(shader);
+ return true;
+}
+
+bool Program::DetachShader(
+ ShaderManager* shader_manager,
+ Shader* shader) {
+ DCHECK(shader_manager);
+ DCHECK(shader);
+ if (attached_shaders_[ShaderTypeToIndex(shader->shader_type())].get() !=
+ shader) {
+ return false;
+ }
+ attached_shaders_[ShaderTypeToIndex(shader->shader_type())] = NULL;
+ shader_manager->UnuseShader(shader);
+ return true;
+}
+
+void Program::DetachShaders(ShaderManager* shader_manager) {
+ DCHECK(shader_manager);
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (attached_shaders_[ii].get()) {
+ DetachShader(shader_manager, attached_shaders_[ii].get());
+ }
+ }
+}
+
+bool Program::CanLink() const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (!attached_shaders_[ii].get() || !attached_shaders_[ii]->valid()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Program::DetectAttribLocationBindingConflicts() const {
+ std::set<GLint> location_binding_used;
+ for (LocationMap::const_iterator it = bind_attrib_location_map_.begin();
+ it != bind_attrib_location_map_.end(); ++it) {
+ // Find out if an attribute is declared in this program's shaders.
+ bool active = false;
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (!attached_shaders_[ii].get() || !attached_shaders_[ii]->valid())
+ continue;
+ if (attached_shaders_[ii]->GetAttribInfo(it->first)) {
+ active = true;
+ break;
+ }
+ }
+ if (active) {
+ std::pair<std::set<GLint>::iterator, bool> result =
+ location_binding_used.insert(it->second);
+ if (!result.second)
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Program::DetectUniformsMismatch(std::string* conflicting_name) const {
+ typedef std::map<std::string, UniformType> UniformMap;
+ UniformMap uniform_map;
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ const ShaderTranslator::VariableMap& shader_uniforms =
+ attached_shaders_[ii]->uniform_map();
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ shader_uniforms.begin();
+ iter != shader_uniforms.end(); ++iter) {
+ const std::string& name = iter->first;
+ UniformType type(iter->second);
+ UniformMap::iterator map_entry = uniform_map.find(name);
+ if (map_entry == uniform_map.end()) {
+ uniform_map[name] = type;
+ } else {
+ // If a uniform is already in the map, i.e., it has already been
+ // declared by other shader, then the type and precision must match.
+ if (map_entry->second == type)
+ continue;
+ *conflicting_name = name;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Program::DetectVaryingsMismatch(std::string* conflicting_name) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* vertex_varyings =
+ &(attached_shaders_[0]->varying_map());
+ const ShaderTranslator::VariableMap* fragment_varyings =
+ &(attached_shaders_[1]->varying_map());
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ fragment_varyings->begin();
+ iter != fragment_varyings->end(); ++iter) {
+ const std::string& name = iter->first;
+ if (IsBuiltInVarying(name))
+ continue;
+
+ ShaderTranslator::VariableMap::const_iterator hit =
+ vertex_varyings->find(name);
+ if (hit == vertex_varyings->end()) {
+ if (iter->second.static_use) {
+ *conflicting_name = name;
+ return true;
+ }
+ continue;
+ }
+
+ if (hit->second.type != iter->second.type ||
+ hit->second.size != iter->second.size) {
+ *conflicting_name = name;
+ return true;
+ }
+
+ }
+ return false;
+}
+
+bool Program::DetectGlobalNameConflicts(std::string* conflicting_name) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* uniforms[2];
+ uniforms[0] = &(attached_shaders_[0]->uniform_map());
+ uniforms[1] = &(attached_shaders_[1]->uniform_map());
+ const ShaderTranslator::VariableMap* attribs =
+ &(attached_shaders_[0]->attrib_map());
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ attribs->begin(); iter != attribs->end(); ++iter) {
+ for (int ii = 0; ii < 2; ++ii) {
+ if (uniforms[ii]->find(iter->first) != uniforms[ii]->end()) {
+ *conflicting_name = iter->first;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Program::CheckVaryingsPacking(
+ Program::VaryingsPackingOption option) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* vertex_varyings =
+ &(attached_shaders_[0]->varying_map());
+ const ShaderTranslator::VariableMap* fragment_varyings =
+ &(attached_shaders_[1]->varying_map());
+
+ std::map<std::string, ShVariableInfo> combined_map;
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ fragment_varyings->begin();
+ iter != fragment_varyings->end(); ++iter) {
+ if (!iter->second.static_use && option == kCountOnlyStaticallyUsed)
+ continue;
+ if (!IsBuiltInVarying(iter->first)) {
+ ShaderTranslator::VariableMap::const_iterator vertex_iter =
+ vertex_varyings->find(iter->first);
+ if (vertex_iter == vertex_varyings->end() ||
+ (!vertex_iter->second.static_use &&
+ option == kCountOnlyStaticallyUsed))
+ continue;
+ }
+
+ ShVariableInfo var;
+ var.type = static_cast<sh::GLenum>(iter->second.type);
+ var.size = iter->second.size;
+ combined_map[iter->first] = var;
+ }
+
+ if (combined_map.size() == 0)
+ return true;
+ scoped_ptr<ShVariableInfo[]> variables(
+ new ShVariableInfo[combined_map.size()]);
+ size_t index = 0;
+ for (std::map<std::string, ShVariableInfo>::const_iterator iter =
+ combined_map.begin();
+ iter != combined_map.end(); ++iter) {
+ variables[index].type = iter->second.type;
+ variables[index].size = iter->second.size;
+ ++index;
+ }
+ return ShCheckVariablesWithinPackingLimits(
+ static_cast<int>(manager_->max_varying_vectors()),
+ variables.get(),
+ combined_map.size()) == 1;
+}
+
+static uint32 ComputeOffset(const void* start, const void* position) {
+ return static_cast<const uint8*>(position) -
+ static_cast<const uint8*>(start);
+}
+
+void Program::GetProgramInfo(
+ ProgramManager* manager, CommonDecoder::Bucket* bucket) const {
+ // NOTE: It seems to me the math in here does not need check for overflow
+ // because the data being calucated from has various small limits. The max
+ // number of attribs + uniforms is somewhere well under 1024. The maximum size
+ // of an identifier is 256 characters.
+ uint32 num_locations = 0;
+ uint32 total_string_size = 0;
+
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ num_locations += 1;
+ total_string_size += info.name.size();
+ }
+
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ num_locations += info.element_locations.size();
+ total_string_size += info.name.size();
+ }
+ }
+
+ uint32 num_inputs = attrib_infos_.size() + num_uniforms_;
+ uint32 input_size = num_inputs * sizeof(ProgramInput);
+ uint32 location_size = num_locations * sizeof(int32);
+ uint32 size = sizeof(ProgramInfoHeader) +
+ input_size + location_size + total_string_size;
+
+ bucket->SetSize(size);
+ ProgramInfoHeader* header = bucket->GetDataAs<ProgramInfoHeader*>(0, size);
+ ProgramInput* inputs = bucket->GetDataAs<ProgramInput*>(
+ sizeof(ProgramInfoHeader), input_size);
+ int32* locations = bucket->GetDataAs<int32*>(
+ sizeof(ProgramInfoHeader) + input_size, location_size);
+ char* strings = bucket->GetDataAs<char*>(
+ sizeof(ProgramInfoHeader) + input_size + location_size,
+ total_string_size);
+ DCHECK(header);
+ DCHECK(inputs);
+ DCHECK(locations);
+ DCHECK(strings);
+
+ header->link_status = link_status_;
+ header->num_attribs = attrib_infos_.size();
+ header->num_uniforms = num_uniforms_;
+
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ inputs->size = info.size;
+ inputs->type = info.type;
+ inputs->location_offset = ComputeOffset(header, locations);
+ inputs->name_offset = ComputeOffset(header, strings);
+ inputs->name_length = info.name.size();
+ *locations++ = info.location;
+ memcpy(strings, info.name.c_str(), info.name.size());
+ strings += info.name.size();
+ ++inputs;
+ }
+
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ inputs->size = info.size;
+ inputs->type = info.type;
+ inputs->location_offset = ComputeOffset(header, locations);
+ inputs->name_offset = ComputeOffset(header, strings);
+ inputs->name_length = info.name.size();
+ DCHECK(static_cast<size_t>(info.size) == info.element_locations.size());
+ for (size_t jj = 0; jj < info.element_locations.size(); ++jj) {
+ if (info.element_locations[jj] == -1)
+ *locations++ = -1;
+ else
+ *locations++ = ProgramManager::MakeFakeLocation(ii, jj);
+ }
+ memcpy(strings, info.name.c_str(), info.name.size());
+ strings += info.name.size();
+ ++inputs;
+ }
+ }
+
+ DCHECK_EQ(ComputeOffset(header, strings), size);
+}
+
+Program::~Program() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ glDeleteProgram(service_id());
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+
+ProgramManager::ProgramManager(ProgramCache* program_cache,
+ uint32 max_varying_vectors)
+ : program_count_(0),
+ have_context_(true),
+ program_cache_(program_cache),
+ max_varying_vectors_(max_varying_vectors) { }
+
+ProgramManager::~ProgramManager() {
+ DCHECK(programs_.empty());
+}
+
+void ProgramManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ programs_.clear();
+}
+
+void ProgramManager::StartTracking(Program* /* program */) {
+ ++program_count_;
+}
+
+void ProgramManager::StopTracking(Program* /* program */) {
+ --program_count_;
+}
+
+Program* ProgramManager::CreateProgram(
+ GLuint client_id, GLuint service_id) {
+ std::pair<ProgramMap::iterator, bool> result =
+ programs_.insert(
+ std::make_pair(client_id,
+ scoped_refptr<Program>(
+ new Program(this, service_id))));
+ DCHECK(result.second);
+ return result.first->second.get();
+}
+
+Program* ProgramManager::GetProgram(GLuint client_id) {
+ ProgramMap::iterator it = programs_.find(client_id);
+ return it != programs_.end() ? it->second.get() : NULL;
+}
+
+bool ProgramManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (ProgramMap::const_iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+ProgramCache* ProgramManager::program_cache() const {
+ return program_cache_;
+}
+
+bool ProgramManager::IsOwned(Program* program) {
+ for (ProgramMap::iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second.get() == program) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ProgramManager::RemoveProgramInfoIfUnused(
+ ShaderManager* shader_manager, Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ if (program->IsDeleted() && !program->InUse()) {
+ program->DetachShaders(shader_manager);
+ for (ProgramMap::iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second.get() == program) {
+ programs_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+}
+
+void ProgramManager::MarkAsDeleted(
+ ShaderManager* shader_manager,
+ Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->MarkAsDeleted();
+ RemoveProgramInfoIfUnused(shader_manager, program);
+}
+
+void ProgramManager::UseProgram(Program* program) {
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->IncUseCount();
+}
+
+void ProgramManager::UnuseProgram(
+ ShaderManager* shader_manager,
+ Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->DecUseCount();
+ RemoveProgramInfoIfUnused(shader_manager, program);
+}
+
+void ProgramManager::ClearUniforms(Program* program) {
+ DCHECK(program);
+ program->ClearUniforms(&zero_);
+}
+
+int32 ProgramManager::MakeFakeLocation(int32 index, int32 element) {
+ return index + element * 0x10000;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_manager.h b/gpu/command_buffer/service/program_manager.h
new file mode 100644
index 0000000..bcc3630
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager.h
@@ -0,0 +1,435 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramCache;
+class ProgramManager;
+class Shader;
+class ShaderManager;
+class ShaderTranslator;
+
+// This is used to track which attributes a particular program needs
+// so we can verify at glDrawXXX time that every attribute is either disabled
+// or if enabled that it points to a valid source.
+class GPU_EXPORT Program : public base::RefCounted<Program> {
+ public:
+ static const int kMaxAttachedShaders = 2;
+
+ enum VaryingsPackingOption {
+ kCountOnlyStaticallyUsed,
+ kCountAll
+ };
+
+ enum UniformApiType {
+ kUniform1i = 1 << 0,
+ kUniform2i = 1 << 1,
+ kUniform3i = 1 << 2,
+ kUniform4i = 1 << 3,
+ kUniform1f = 1 << 4,
+ kUniform2f = 1 << 5,
+ kUniform3f = 1 << 6,
+ kUniform4f = 1 << 7,
+ kUniformMatrix2f = 1 << 8,
+ kUniformMatrix3f = 1 << 9,
+ kUniformMatrix4f = 1 << 10,
+ };
+
+ struct UniformInfo {
+ UniformInfo();
+ UniformInfo(
+ GLsizei _size, GLenum _type, GLint _fake_location_base,
+ const std::string& _name);
+ ~UniformInfo();
+
+ bool IsValid() const {
+ return size != 0;
+ }
+
+ bool IsSampler() const {
+ return type == GL_SAMPLER_2D || type == GL_SAMPLER_2D_RECT_ARB ||
+ type == GL_SAMPLER_CUBE || type == GL_SAMPLER_EXTERNAL_OES;
+ }
+
+ GLsizei size;
+ GLenum type;
+ uint32 accepts_api_type;
+ GLint fake_location_base;
+ bool is_array;
+ std::string name;
+ std::vector<GLint> element_locations;
+ std::vector<GLuint> texture_units;
+ };
+ struct VertexAttrib {
+ VertexAttrib(GLsizei _size, GLenum _type, const std::string& _name,
+ GLint _location)
+ : size(_size),
+ type(_type),
+ location(_location),
+ name(_name) {
+ }
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ std::string name;
+ };
+
+ typedef std::vector<UniformInfo> UniformInfoVector;
+ typedef std::vector<VertexAttrib> AttribInfoVector;
+ typedef std::vector<int> SamplerIndices;
+ typedef std::map<std::string, GLint> LocationMap;
+
+ Program(ProgramManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ const SamplerIndices& sampler_indices() {
+ return sampler_indices_;
+ }
+
+ const AttribInfoVector& GetAttribInfos() const {
+ return attrib_infos_;
+ }
+
+ const VertexAttrib* GetAttribInfo(GLint index) const {
+ return (static_cast<size_t>(index) < attrib_infos_.size()) ?
+ &attrib_infos_[index] : NULL;
+ }
+
+ GLint GetAttribLocation(const std::string& name) const;
+
+ const VertexAttrib* GetAttribInfoByLocation(GLuint location) const {
+ if (location < attrib_location_to_index_map_.size()) {
+ GLint index = attrib_location_to_index_map_[location];
+ if (index >= 0) {
+ return &attrib_infos_[index];
+ }
+ }
+ return NULL;
+ }
+
+ const UniformInfo* GetUniformInfo(GLint index) const;
+
+ // If the original name is not found, return NULL.
+ const std::string* GetAttribMappedName(
+ const std::string& original_name) const;
+
+ // If the hashed name is not found, return NULL.
+ const std::string* GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const;
+
+ // Gets the fake location of a uniform by name.
+ GLint GetUniformFakeLocation(const std::string& name) const;
+
+ // Gets the UniformInfo of a uniform by location.
+ const UniformInfo* GetUniformInfoByFakeLocation(
+ GLint fake_location, GLint* real_location, GLint* array_index) const;
+
+ // Gets all the program info.
+ void GetProgramInfo(
+ ProgramManager* manager, CommonDecoder::Bucket* bucket) const;
+
+ // Sets the sampler values for a uniform.
+ // This is safe to call for any location. If the location is not
+ // a sampler uniform nothing will happen.
+ // Returns false if fake_location is a sampler and any value
+ // is >= num_texture_units. Returns true otherwise.
+ bool SetSamplers(
+ GLint num_texture_units, GLint fake_location,
+ GLsizei count, const GLint* value);
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ void GetProgramiv(GLenum pname, GLint* params);
+
+ bool IsValid() const {
+ return valid_;
+ }
+
+ bool AttachShader(ShaderManager* manager, Shader* shader);
+ bool DetachShader(ShaderManager* manager, Shader* shader);
+
+ bool CanLink() const;
+
+ // Performs glLinkProgram and related activities.
+ bool Link(ShaderManager* manager,
+ ShaderTranslator* vertex_translator,
+ ShaderTranslator* fragment_shader,
+ VaryingsPackingOption varyings_packing_option,
+ const ShaderCacheCallback& shader_callback);
+
+ // Performs glValidateProgram and related activities.
+ void Validate();
+
+ const std::string* log_info() const {
+ return log_info_.get();
+ }
+
+ bool InUse() const {
+ DCHECK_GE(use_count_, 0);
+ return use_count_ != 0;
+ }
+
+ // Sets attribute-location binding from a glBindAttribLocation() call.
+ void SetAttribLocationBinding(const std::string& attrib, GLint location) {
+ bind_attrib_location_map_[attrib] = location;
+ }
+
+ // Sets uniform-location binding from a glBindUniformLocationCHROMIUM call.
+ // returns false if error.
+ bool SetUniformLocationBinding(const std::string& name, GLint location);
+
+ // Detects if there are attribute location conflicts from
+ // glBindAttribLocation() calls.
+ // We only consider the declared attributes in the program.
+ bool DetectAttribLocationBindingConflicts() const;
+
+ // Detects if there are uniforms of the same name but different type
+ // or precision in vertex/fragment shaders.
+ // Return true and set the first found conflicting hashed name to
+ // conflicting_name if such cases are detected.
+ bool DetectUniformsMismatch(std::string* conflicting_name) const;
+
+ // Return true if a varying is statically used in fragment shader, but it
+ // is not declared in vertex shader.
+ bool DetectVaryingsMismatch(std::string* conflicting_name) const;
+
+ // Return true if an uniform and an attribute share the same name.
+ bool DetectGlobalNameConflicts(std::string* conflicting_name) const;
+
+ // Return false if varyings can't be packed into the max available
+ // varying registers.
+ bool CheckVaryingsPacking(VaryingsPackingOption option) const;
+
+ // Visible for testing
+ const LocationMap& bind_attrib_location_map() const {
+ return bind_attrib_location_map_;
+ }
+
+ private:
+ friend class base::RefCounted<Program>;
+ friend class ProgramManager;
+
+ ~Program();
+
+ void set_log_info(const char* str) {
+ log_info_.reset(str ? new std::string(str) : NULL);
+ }
+
+ void ClearLinkStatus() {
+ link_status_ = false;
+ }
+
+ void IncUseCount() {
+ ++use_count_;
+ }
+
+ void DecUseCount() {
+ --use_count_;
+ DCHECK_GE(use_count_, 0);
+ }
+
+ void MarkAsDeleted() {
+ DCHECK(!deleted_);
+ deleted_ = true;
+ }
+
+ // Resets the program.
+ void Reset();
+
+ // Updates the program info after a successful link.
+ void Update();
+
+ // Process the program log, replacing the hashed names with original names.
+ std::string ProcessLogInfo(const std::string& log);
+
+ // Updates the program log info from GL
+ void UpdateLogInfo();
+
+ // Clears all the uniforms.
+ void ClearUniforms(std::vector<uint8>* zero_buffer);
+
+ // If long attribate names are mapped during shader translation, call
+ // glBindAttribLocation() again with the mapped names.
+ // This is called right before the glLink() call, but after shaders are
+ // translated.
+ void ExecuteBindAttribLocationCalls();
+
+ bool AddUniformInfo(
+ GLsizei size, GLenum type, GLint location, GLint fake_base_location,
+ const std::string& name, const std::string& original_name,
+ size_t* next_available_index);
+
+ void GetCorrectedVariableInfo(
+ bool use_uniforms, const std::string& name, std::string* corrected_name,
+ std::string* original_name, GLsizei* size, GLenum* type) const;
+
+ void DetachShaders(ShaderManager* manager);
+
+ static inline GLint GetUniformInfoIndexFromFakeLocation(
+ GLint fake_location) {
+ return fake_location & 0xFFFF;
+ }
+
+ static inline GLint GetArrayElementIndexFromFakeLocation(
+ GLint fake_location) {
+ return (fake_location >> 16) & 0xFFFF;
+ }
+
+ ProgramManager* manager_;
+
+ int use_count_;
+
+ GLsizei max_attrib_name_length_;
+
+ // Attrib by index.
+ AttribInfoVector attrib_infos_;
+
+ // Attrib by location to index.
+ std::vector<GLint> attrib_location_to_index_map_;
+
+ GLsizei max_uniform_name_length_;
+
+ // Uniform info by index.
+ UniformInfoVector uniform_infos_;
+
+ // The indices of the uniforms that are samplers.
+ SamplerIndices sampler_indices_;
+
+ // The program this Program is tracking.
+ GLuint service_id_;
+
+ // Shaders by type of shader.
+ scoped_refptr<Shader>
+ attached_shaders_[kMaxAttachedShaders];
+
+ // True if this program is marked as deleted.
+ bool deleted_;
+
+ // This is true if glLinkProgram was successful at least once.
+ bool valid_;
+
+ // This is true if glLinkProgram was successful last time it was called.
+ bool link_status_;
+
+ // True if the uniforms have been cleared.
+ bool uniforms_cleared_;
+
+ // This is different than uniform_infos_.size() because
+ // that is a sparce array.
+ GLint num_uniforms_;
+
+ // Log info
+ scoped_ptr<std::string> log_info_;
+
+ // attribute-location binding map from glBindAttribLocation() calls.
+ LocationMap bind_attrib_location_map_;
+
+ // uniform-location binding map from glBindUniformLocationCHROMIUM() calls.
+ LocationMap bind_uniform_location_map_;
+};
+
+// Tracks the Programs.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT ProgramManager {
+ public:
+ explicit ProgramManager(ProgramCache* program_cache,
+ uint32 max_varying_vectors);
+ ~ProgramManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a new program.
+ Program* CreateProgram(GLuint client_id, GLuint service_id);
+
+ // Gets a program.
+ Program* GetProgram(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ // Gets the shader cache
+ ProgramCache* program_cache() const;
+
+ // Marks a program as deleted. If it is not used the program will be deleted.
+ void MarkAsDeleted(ShaderManager* shader_manager, Program* program);
+
+ // Marks a program as used.
+ void UseProgram(Program* program);
+
+ // Makes a program as unused. If deleted the program will be removed.
+ void UnuseProgram(ShaderManager* shader_manager, Program* program);
+
+ // Clears the uniforms for this program.
+ void ClearUniforms(Program* program);
+
+ // Returns true if prefix is invalid for gl.
+ static bool IsInvalidPrefix(const char* name, size_t length);
+
+ // Check if a Program is owned by this ProgramManager.
+ bool IsOwned(Program* program);
+
+ static int32 MakeFakeLocation(int32 index, int32 element);
+
+ uint32 max_varying_vectors() const {
+ return max_varying_vectors_;
+ }
+
+ private:
+ friend class Program;
+
+ void StartTracking(Program* program);
+ void StopTracking(Program* program);
+
+ void RemoveProgramInfoIfUnused(
+ ShaderManager* shader_manager, Program* program);
+
+ // Info for each "successfully linked" program by service side program Id.
+ // TODO(gman): Choose a faster container.
+ typedef std::map<GLuint, scoped_refptr<Program> > ProgramMap;
+ ProgramMap programs_;
+
+ // Counts the number of Program allocated with 'this' as its manager.
+ // Allows to check no Program will outlive this.
+ unsigned int program_count_;
+
+ bool have_context_;
+
+ // Used to clear uniforms.
+ std::vector<uint8> zero_;
+
+ ProgramCache* program_cache_;
+
+ uint32 max_varying_vectors_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
diff --git a/gpu/command_buffer/service/program_manager_unittest.cc b/gpu/command_buffer/service/program_manager_unittest.cc
new file mode 100644
index 0000000..3cca263
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager_unittest.cc
@@ -0,0 +1,1724 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_manager.h"
+
+#include <algorithm>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::ReturnRef;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+const uint32 kMaxVaryingVectors = 8;
+
+void ShaderCacheCb(const std::string& key, const std::string& shader) {}
+} // namespace anonymous
+
+class ProgramManagerTest : public GpuServiceTest {
+ public:
+ ProgramManagerTest() : manager_(NULL, kMaxVaryingVectors) { }
+ virtual ~ProgramManagerTest() {
+ manager_.Destroy(false);
+ }
+
+ protected:
+ ProgramManager manager_;
+};
+
+TEST_F(ProgramManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ // Check we can create program.
+ manager_.CreateProgram(kClient1Id, kService1Id);
+ // Check program got created.
+ Program* program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_TRUE(program1 != NULL);
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(program1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent program.
+ EXPECT_TRUE(manager_.GetProgram(kClient2Id) == NULL);
+}
+
+TEST_F(ProgramManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create program.
+ Program* program0 = manager_.CreateProgram(kClient1Id, kService1Id);
+ ASSERT_TRUE(program0 != NULL);
+ // Check program got created.
+ Program* program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_EQ(program0, program1);
+ EXPECT_CALL(*gl_, DeleteProgram(kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check the resources were released.
+ program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_TRUE(program1 == NULL);
+}
+
+TEST_F(ProgramManagerTest, DeleteBug) {
+ ShaderManager shader_manager;
+ const GLuint kClient1Id = 1;
+ const GLuint kClient2Id = 2;
+ const GLuint kService1Id = 11;
+ const GLuint kService2Id = 12;
+ // Check we can create program.
+ scoped_refptr<Program> program1(
+ manager_.CreateProgram(kClient1Id, kService1Id));
+ scoped_refptr<Program> program2(
+ manager_.CreateProgram(kClient2Id, kService2Id));
+ // Check program got created.
+ ASSERT_TRUE(program1.get());
+ ASSERT_TRUE(program2.get());
+ manager_.UseProgram(program1.get());
+ manager_.MarkAsDeleted(&shader_manager, program1.get());
+ // Program will be deleted when last ref is released.
+ EXPECT_CALL(*gl_, DeleteProgram(kService2Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.MarkAsDeleted(&shader_manager, program2.get());
+ EXPECT_TRUE(manager_.IsOwned(program1.get()));
+ EXPECT_FALSE(manager_.IsOwned(program2.get()));
+}
+
+TEST_F(ProgramManagerTest, Program) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create program.
+ Program* program1 = manager_.CreateProgram(
+ kClient1Id, kService1Id);
+ ASSERT_TRUE(program1);
+ EXPECT_EQ(kService1Id, program1->service_id());
+ EXPECT_FALSE(program1->InUse());
+ EXPECT_FALSE(program1->IsValid());
+ EXPECT_FALSE(program1->IsDeleted());
+ EXPECT_FALSE(program1->CanLink());
+ EXPECT_TRUE(program1->log_info() == NULL);
+}
+
+class ProgramManagerWithShaderTest : public GpuServiceTest {
+ public:
+ ProgramManagerWithShaderTest()
+ : manager_(NULL, kMaxVaryingVectors), program_(NULL) {
+ }
+
+ virtual ~ProgramManagerWithShaderTest() {
+ manager_.Destroy(false);
+ shader_manager_.Destroy(false);
+ }
+
+ static const GLint kNumVertexAttribs = 16;
+
+ static const GLuint kClientProgramId = 123;
+ static const GLuint kServiceProgramId = 456;
+ static const GLuint kVertexShaderClientId = 201;
+ static const GLuint kFragmentShaderClientId = 202;
+ static const GLuint kVertexShaderServiceId = 301;
+ static const GLuint kFragmentShaderServiceId = 302;
+
+ static const char* kAttrib1Name;
+ static const char* kAttrib2Name;
+ static const char* kAttrib3Name;
+ static const GLint kAttrib1Size = 1;
+ static const GLint kAttrib2Size = 1;
+ static const GLint kAttrib3Size = 1;
+ static const int kAttrib1Precision = SH_PRECISION_MEDIUMP;
+ static const int kAttrib2Precision = SH_PRECISION_HIGHP;
+ static const int kAttrib3Precision = SH_PRECISION_LOWP;
+ static const int kAttribStaticUse = 0;
+ static const GLint kAttrib1Location = 0;
+ static const GLint kAttrib2Location = 1;
+ static const GLint kAttrib3Location = 2;
+ static const GLenum kAttrib1Type = GL_FLOAT_VEC4;
+ static const GLenum kAttrib2Type = GL_FLOAT_VEC2;
+ static const GLenum kAttrib3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidAttribLocation = 30;
+ static const GLint kBadAttribIndex = kNumVertexAttribs;
+
+ static const char* kUniform1Name;
+ static const char* kUniform2Name;
+ static const char* kUniform3BadName;
+ static const char* kUniform3GoodName;
+ static const GLint kUniform1Size = 1;
+ static const GLint kUniform2Size = 3;
+ static const GLint kUniform3Size = 2;
+ static const int kUniform1Precision = SH_PRECISION_LOWP;
+ static const int kUniform2Precision = SH_PRECISION_MEDIUMP;
+ static const int kUniform3Precision = SH_PRECISION_HIGHP;
+ static const int kUniform1StaticUse = 1;
+ static const int kUniform2StaticUse = 1;
+ static const int kUniform3StaticUse = 1;
+ static const GLint kUniform1FakeLocation = 0; // These are hard coded
+ static const GLint kUniform2FakeLocation = 1; // to match
+ static const GLint kUniform3FakeLocation = 2; // ProgramManager.
+ static const GLint kUniform1RealLocation = 11;
+ static const GLint kUniform2RealLocation = 22;
+ static const GLint kUniform3RealLocation = 33;
+ static const GLint kUniform1DesiredLocation = -1;
+ static const GLint kUniform2DesiredLocation = -1;
+ static const GLint kUniform3DesiredLocation = -1;
+ static const GLenum kUniform1Type = GL_FLOAT_VEC4;
+ static const GLenum kUniform2Type = GL_INT_VEC2;
+ static const GLenum kUniform3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidUniformLocation = 30;
+ static const GLint kBadUniformIndex = 1000;
+
+ static const size_t kNumAttribs;
+ static const size_t kNumUniforms;
+
+ protected:
+ typedef TestHelper::AttribInfo AttribInfo;
+ typedef TestHelper::UniformInfo UniformInfo;
+
+ typedef enum {
+ kVarUniform,
+ kVarVarying,
+ kVarAttribute
+ } VarCategory;
+
+ typedef struct {
+ int type;
+ int size;
+ int precision;
+ int static_use;
+ std::string name;
+ VarCategory category;
+ } VarInfo;
+
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ SetupDefaultShaderExpectations();
+
+ Shader* vertex_shader = shader_manager_.CreateShader(
+ kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fragment_shader =
+ shader_manager_.CreateShader(
+ kFragmentShaderClientId, kFragmentShaderServiceId,
+ GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader != NULL);
+ ASSERT_TRUE(fragment_shader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader, true);
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader, true);
+
+ program_ = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program_ != NULL);
+
+ program_->AttachShader(&shader_manager_, vertex_shader);
+ program_->AttachShader(&shader_manager_, fragment_shader);
+ program_->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ }
+
+ void SetupShader(AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ TestHelper::SetupShader(
+ gl_.get(), attribs, num_attribs, uniforms, num_uniforms, service_id);
+ }
+
+ void SetupDefaultShaderExpectations() {
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ }
+
+ void SetupExpectationsForClearingUniforms(
+ UniformInfo* uniforms, size_t num_uniforms) {
+ TestHelper::SetupExpectationsForClearingUniforms(
+ gl_.get(), uniforms, num_uniforms);
+ }
+
+ // Return true if link status matches expected_link_status
+ bool LinkAsExpected(Program* program,
+ bool expected_link_status) {
+ GLuint service_id = program->service_id();
+ if (expected_link_status) {
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ service_id);
+ }
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ GLint link_status;
+ program->GetProgramiv(GL_LINK_STATUS, &link_status);
+ return (static_cast<bool>(link_status) == expected_link_status);
+ }
+
+ Program* SetupShaderVariableTest(const VarInfo* vertex_variables,
+ size_t vertex_variable_size,
+ const VarInfo* fragment_variables,
+ size_t fragment_variable_size) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+
+ ShaderTranslator::VariableMap vertex_attrib_map;
+ ShaderTranslator::VariableMap vertex_uniform_map;
+ ShaderTranslator::VariableMap vertex_varying_map;
+ for (size_t ii = 0; ii < vertex_variable_size; ++ii) {
+ ShaderTranslator::VariableMap* map = NULL;
+ switch (vertex_variables[ii].category) {
+ case kVarAttribute:
+ map = &vertex_attrib_map;
+ break;
+ case kVarUniform:
+ map = &vertex_uniform_map;
+ break;
+ case kVarVarying:
+ map = &vertex_varying_map;
+ break;
+ default:
+ NOTREACHED();
+ }
+ (*map)[vertex_variables[ii].name] =
+ ShaderTranslator::VariableInfo(vertex_variables[ii].type,
+ vertex_variables[ii].size,
+ vertex_variables[ii].precision,
+ vertex_variables[ii].static_use,
+ vertex_variables[ii].name);
+ }
+
+ ShaderTranslator::VariableMap frag_attrib_map;
+ ShaderTranslator::VariableMap frag_uniform_map;
+ ShaderTranslator::VariableMap frag_varying_map;
+ for (size_t ii = 0; ii < fragment_variable_size; ++ii) {
+ ShaderTranslator::VariableMap* map = NULL;
+ switch (fragment_variables[ii].category) {
+ case kVarAttribute:
+ map = &frag_attrib_map;
+ break;
+ case kVarUniform:
+ map = &frag_uniform_map;
+ break;
+ case kVarVarying:
+ map = &frag_varying_map;
+ break;
+ default:
+ NOTREACHED();
+ }
+ (*map)[fragment_variables[ii].name] =
+ ShaderTranslator::VariableInfo(fragment_variables[ii].type,
+ fragment_variables[ii].size,
+ fragment_variables[ii].precision,
+ fragment_variables[ii].static_use,
+ fragment_variables[ii].name);
+ }
+
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ EXPECT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL,
+ &vertex_attrib_map, &vertex_uniform_map, &vertex_varying_map, NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL,
+ &frag_attrib_map, &frag_uniform_map, &frag_varying_map, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ EXPECT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ return program;
+ }
+
+ static AttribInfo kAttribs[];
+ static UniformInfo kUniforms[];
+
+ ProgramManager manager_;
+ Program* program_;
+ ShaderManager shader_manager_;
+};
+
+ProgramManagerWithShaderTest::AttribInfo
+ ProgramManagerWithShaderTest::kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint ProgramManagerWithShaderTest::kNumVertexAttribs;
+const GLuint ProgramManagerWithShaderTest::kClientProgramId;
+const GLuint ProgramManagerWithShaderTest::kServiceProgramId;
+const GLuint ProgramManagerWithShaderTest::kVertexShaderClientId;
+const GLuint ProgramManagerWithShaderTest::kFragmentShaderClientId;
+const GLuint ProgramManagerWithShaderTest::kVertexShaderServiceId;
+const GLuint ProgramManagerWithShaderTest::kFragmentShaderServiceId;
+const GLint ProgramManagerWithShaderTest::kAttrib1Size;
+const GLint ProgramManagerWithShaderTest::kAttrib2Size;
+const GLint ProgramManagerWithShaderTest::kAttrib3Size;
+const GLint ProgramManagerWithShaderTest::kAttrib1Location;
+const GLint ProgramManagerWithShaderTest::kAttrib2Location;
+const GLint ProgramManagerWithShaderTest::kAttrib3Location;
+const GLenum ProgramManagerWithShaderTest::kAttrib1Type;
+const GLenum ProgramManagerWithShaderTest::kAttrib2Type;
+const GLenum ProgramManagerWithShaderTest::kAttrib3Type;
+const GLint ProgramManagerWithShaderTest::kInvalidAttribLocation;
+const GLint ProgramManagerWithShaderTest::kBadAttribIndex;
+const GLint ProgramManagerWithShaderTest::kUniform1Size;
+const GLint ProgramManagerWithShaderTest::kUniform2Size;
+const GLint ProgramManagerWithShaderTest::kUniform3Size;
+const GLint ProgramManagerWithShaderTest::kUniform1FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform1RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform1DesiredLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2DesiredLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3DesiredLocation;
+const GLenum ProgramManagerWithShaderTest::kUniform1Type;
+const GLenum ProgramManagerWithShaderTest::kUniform2Type;
+const GLenum ProgramManagerWithShaderTest::kUniform3Type;
+const GLint ProgramManagerWithShaderTest::kInvalidUniformLocation;
+const GLint ProgramManagerWithShaderTest::kBadUniformIndex;
+#endif
+
+const size_t ProgramManagerWithShaderTest::kNumAttribs =
+ arraysize(ProgramManagerWithShaderTest::kAttribs);
+
+ProgramManagerWithShaderTest::UniformInfo
+ ProgramManagerWithShaderTest::kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+};
+
+const size_t ProgramManagerWithShaderTest::kNumUniforms =
+ arraysize(ProgramManagerWithShaderTest::kUniforms);
+
+const char* ProgramManagerWithShaderTest::kAttrib1Name = "attrib1";
+const char* ProgramManagerWithShaderTest::kAttrib2Name = "attrib2";
+const char* ProgramManagerWithShaderTest::kAttrib3Name = "attrib3";
+const char* ProgramManagerWithShaderTest::kUniform1Name = "uniform1";
+// Correctly has array spec.
+const char* ProgramManagerWithShaderTest::kUniform2Name = "uniform2[0]";
+// Incorrectly missing array spec.
+const char* ProgramManagerWithShaderTest::kUniform3BadName = "uniform3";
+const char* ProgramManagerWithShaderTest::kUniform3GoodName = "uniform3[0]";
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribInfos) {
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::AttribInfoVector& infos =
+ program->GetAttribInfos();
+ ASSERT_EQ(kNumAttribs, infos.size());
+ for (size_t ii = 0; ii < kNumAttribs; ++ii) {
+ const Program::VertexAttrib& info = infos[ii];
+ const AttribInfo& expected = kAttribs[ii];
+ EXPECT_EQ(expected.size, info.size);
+ EXPECT_EQ(expected.type, info.type);
+ EXPECT_EQ(expected.location, info.location);
+ EXPECT_STREQ(expected.name, info.name.c_str());
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribInfo) {
+ const GLint kValidIndex = 1;
+ const GLint kInvalidIndex = 1000;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::VertexAttrib* info =
+ program->GetAttribInfo(kValidIndex);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kAttrib2Size, info->size);
+ EXPECT_EQ(kAttrib2Type, info->type);
+ EXPECT_EQ(kAttrib2Location, info->location);
+ EXPECT_STREQ(kAttrib2Name, info->name.c_str());
+ EXPECT_TRUE(program->GetAttribInfo(kInvalidIndex) == NULL);
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribLocation) {
+ const char* kInvalidName = "foo";
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_EQ(kAttrib2Location, program->GetAttribLocation(kAttrib2Name));
+ EXPECT_EQ(-1, program->GetAttribLocation(kInvalidName));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformInfo) {
+ const GLint kInvalidIndex = 1000;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::UniformInfo* info =
+ program->GetUniformInfo(0);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform1Size, info->size);
+ EXPECT_EQ(kUniform1Type, info->type);
+ EXPECT_EQ(kUniform1RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform1Name, info->name.c_str());
+ info = program->GetUniformInfo(1);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2Size, info->size);
+ EXPECT_EQ(kUniform2Type, info->type);
+ EXPECT_EQ(kUniform2RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform2Name, info->name.c_str());
+ info = program->GetUniformInfo(2);
+ // We emulate certain OpenGL drivers by supplying the name without
+ // the array spec. Our implementation should correctly add the required spec.
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform3Size, info->size);
+ EXPECT_EQ(kUniform3Type, info->type);
+ EXPECT_EQ(kUniform3RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform3GoodName, info->name.c_str());
+ EXPECT_TRUE(program->GetUniformInfo(kInvalidIndex) == NULL);
+}
+
+TEST_F(ProgramManagerWithShaderTest, AttachDetachShader) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->CanLink());
+ program->DetachShader(&shader_manager_, vshader);
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->CanLink());
+ program->DetachShader(&shader_manager_, fshader);
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_FALSE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), vshader, false);
+ EXPECT_FALSE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ EXPECT_TRUE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), fshader, false);
+ EXPECT_FALSE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_TRUE(program->DetachShader(&shader_manager_, fshader));
+ EXPECT_FALSE(program->DetachShader(&shader_manager_, fshader));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformFakeLocation) {
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ // Emulate the situation that uniform3[1] isn't used and optimized out by
+ // a driver, so it's location is -1.
+ Program::UniformInfo* uniform = const_cast<Program::UniformInfo*>(
+ program->GetUniformInfo(2));
+ ASSERT_TRUE(uniform != NULL && kUniform3Size == 2);
+ EXPECT_EQ(kUniform3Size, uniform->size);
+ uniform->element_locations[1] = -1;
+ EXPECT_EQ(kUniform1FakeLocation,
+ program->GetUniformFakeLocation(kUniform1Name));
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation(kUniform2Name));
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3BadName));
+ // Check we can get uniform2 as "uniform2" even though the name is
+ // "uniform2[0]"
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation("uniform2"));
+ // Check we can get uniform3 as "uniform3[0]" even though we simulated GL
+ // returning "uniform3"
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3GoodName));
+ // Check that we can get the locations of the array elements > 1
+ EXPECT_EQ(ProgramManager::MakeFakeLocation(kUniform2FakeLocation, 1),
+ program->GetUniformFakeLocation("uniform2[1]"));
+ EXPECT_EQ(ProgramManager::MakeFakeLocation(kUniform2FakeLocation, 2),
+ program->GetUniformFakeLocation("uniform2[2]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform2[3]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform3[1]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform3[2]"));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformInfoByFakeLocation) {
+ const GLint kInvalidLocation = 1234;
+ const Program::UniformInfo* info;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ GLint real_location = -1;
+ GLint array_index = -1;
+ ASSERT_TRUE(program != NULL);
+ info = program->GetUniformInfoByFakeLocation(
+ kUniform2FakeLocation, &real_location, &array_index);
+ EXPECT_EQ(kUniform2RealLocation, real_location);
+ EXPECT_EQ(0, array_index);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2Type, info->type);
+ real_location = -1;
+ array_index = -1;
+ info = program->GetUniformInfoByFakeLocation(
+ kInvalidLocation, &real_location, &array_index);
+ EXPECT_TRUE(info == NULL);
+ EXPECT_EQ(-1, real_location);
+ EXPECT_EQ(-1, array_index);
+ GLint loc = program->GetUniformFakeLocation("uniform2[2]");
+ info = program->GetUniformInfoByFakeLocation(
+ loc, &real_location, &array_index);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2RealLocation + 2 * 2, real_location);
+ EXPECT_EQ(2, array_index);
+}
+
+// Some GL drivers incorrectly return gl_DepthRange and possibly other uniforms
+// that start with "gl_". Our implementation catches these and does not allow
+// them back to client.
+TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsGLUnderscoreUniform) {
+ static const char* kUniform2Name = "gl_longNameWeCanCheckFor";
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ SetupShader(
+ kAttribs, kNumAttribs, kUniforms, kNumUniforms, kServiceProgramId);
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ GLint value = 0;
+ program->GetProgramiv(GL_ACTIVE_ATTRIBUTES, &value);
+ EXPECT_EQ(3, value);
+ // Check that we skipped the "gl_" uniform.
+ program->GetProgramiv(GL_ACTIVE_UNIFORMS, &value);
+ EXPECT_EQ(2, value);
+ // Check that our max length adds room for the array spec and is not as long
+ // as the "gl_" uniform we skipped.
+ // +4u is to account for "gl_" and NULL terminator.
+ program->GetProgramiv(GL_ACTIVE_UNIFORM_MAX_LENGTH, &value);
+ EXPECT_EQ(strlen(kUniform3BadName) + 4u, static_cast<size_t>(value));
+}
+
+// Test the bug comparing similar array names is fixed.
+TEST_F(ProgramManagerWithShaderTest, SimilarArrayNames) {
+ static const char* kUniform2Name = "u_nameLong[0]";
+ static const char* kUniform3Name = "u_name[0]";
+ static const GLint kUniform2Size = 2;
+ static const GLint kUniform3Size = 2;
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3Name,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3Name,
+ },
+ };
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ SetupShader(
+ kAttribs, kNumAttribs, kUniforms, kNumUniforms, kServiceProgramId);
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+
+ // Check that we get the correct locations.
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation(kUniform2Name));
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3Name));
+}
+
+// Some GL drivers incorrectly return the wrong type. For example they return
+// GL_FLOAT_VEC2 when they should return GL_FLOAT_MAT2. Check we handle this.
+TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsWrongTypeInfo) {
+ static GLenum kAttrib2BadType = GL_FLOAT_VEC2;
+ static GLenum kAttrib2GoodType = GL_FLOAT_MAT2;
+ static GLenum kUniform2BadType = GL_FLOAT_VEC3;
+ static GLenum kUniform2GoodType = GL_FLOAT_MAT3;
+ ShaderTranslator::VariableMap attrib_map;
+ ShaderTranslator::VariableMap uniform_map;
+ ShaderTranslator::VariableMap varying_map;
+ attrib_map[kAttrib1Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib1Type, kAttrib1Size, kAttrib1Precision,
+ kAttribStaticUse, kAttrib1Name);
+ attrib_map[kAttrib2Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib2GoodType, kAttrib2Size, kAttrib2Precision,
+ kAttribStaticUse, kAttrib2Name);
+ attrib_map[kAttrib3Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib3Type, kAttrib3Size, kAttrib3Precision,
+ kAttribStaticUse, kAttrib3Name);
+ uniform_map[kUniform1Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform1Type, kUniform1Size, kUniform1Precision,
+ kUniform1StaticUse, kUniform1Name);
+ uniform_map[kUniform2Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform2GoodType, kUniform2Size, kUniform2Precision,
+ kUniform2StaticUse, kUniform2Name);
+ uniform_map[kUniform3GoodName] = ShaderTranslatorInterface::VariableInfo(
+ kUniform3Type, kUniform3Size, kUniform3Precision,
+ kUniform3StaticUse, kUniform3GoodName);
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2BadType, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2BadType,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumAttribs= arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program!= NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ // Check that we got the good type, not the bad.
+ // Check Attribs
+ for (unsigned index = 0; index < kNumAttribs; ++index) {
+ const Program::VertexAttrib* attrib_info =
+ program->GetAttribInfo(index);
+ ASSERT_TRUE(attrib_info != NULL);
+ ShaderTranslator::VariableMap::const_iterator it = attrib_map.find(
+ attrib_info->name);
+ ASSERT_TRUE(it != attrib_map.end());
+ EXPECT_EQ(it->first, attrib_info->name);
+ EXPECT_EQ(static_cast<GLenum>(it->second.type), attrib_info->type);
+ EXPECT_EQ(it->second.size, attrib_info->size);
+ EXPECT_EQ(it->second.name, attrib_info->name);
+ }
+ // Check Uniforms
+ for (unsigned index = 0; index < kNumUniforms; ++index) {
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfo(index);
+ ASSERT_TRUE(uniform_info != NULL);
+ ShaderTranslator::VariableMap::const_iterator it = uniform_map.find(
+ uniform_info->name);
+ ASSERT_TRUE(it != uniform_map.end());
+ EXPECT_EQ(it->first, uniform_info->name);
+ EXPECT_EQ(static_cast<GLenum>(it->second.type), uniform_info->type);
+ EXPECT_EQ(it->second.size, uniform_info->size);
+ EXPECT_EQ(it->second.name, uniform_info->name);
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoUseCount) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(vshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(fshader->InUse());
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_FALSE(program->InUse());
+ EXPECT_FALSE(program->IsDeleted());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.MarkAsDeleted(&shader_manager_, program);
+ EXPECT_TRUE(program->IsDeleted());
+ Program* info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_EQ(program, info2);
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_TRUE(program->InUse());
+ // this should delete the info.
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.UnuseProgram(&shader_manager_, program);
+ info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_TRUE(info2 == NULL);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoUseCount2) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(vshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(fshader->InUse());
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_FALSE(program->InUse());
+ EXPECT_FALSE(program->IsDeleted());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_FALSE(program->InUse());
+ Program* info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_EQ(program, info2);
+ // this should delete the program.
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.MarkAsDeleted(&shader_manager_, program);
+ info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_TRUE(info2 == NULL);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoGetProgramInfo) {
+ CommonDecoder::Bucket bucket;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ program->GetProgramInfo(&manager_, &bucket);
+ ProgramInfoHeader* header =
+ bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(1u, header->link_status);
+ EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
+ EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
+ sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ ASSERT_TRUE(inputs != NULL);
+ const ProgramInput* input = inputs;
+ // TODO(gman): Don't assume these are in order.
+ for (uint32 ii = 0; ii < header->num_attribs; ++ii) {
+ const AttribInfo& expected = kAttribs[ii];
+ EXPECT_EQ(expected.size, input->size);
+ EXPECT_EQ(expected.type, input->type);
+ const int32* location = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32));
+ ASSERT_TRUE(location != NULL);
+ EXPECT_EQ(expected.location, *location);
+ const char* name_buf = bucket.GetDataAs<const char*>(
+ input->name_offset, input->name_length);
+ ASSERT_TRUE(name_buf != NULL);
+ std::string name(name_buf, input->name_length);
+ EXPECT_STREQ(expected.name, name.c_str());
+ ++input;
+ }
+ // TODO(gman): Don't assume these are in order.
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const UniformInfo& expected = kUniforms[ii];
+ EXPECT_EQ(expected.size, input->size);
+ EXPECT_EQ(expected.type, input->type);
+ const int32* locations = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32) * input->size);
+ ASSERT_TRUE(locations != NULL);
+ for (int32 jj = 0; jj < input->size; ++jj) {
+ EXPECT_EQ(
+ ProgramManager::MakeFakeLocation(expected.fake_location, jj),
+ locations[jj]);
+ }
+ const char* name_buf = bucket.GetDataAs<const char*>(
+ input->name_offset, input->name_length);
+ ASSERT_TRUE(name_buf != NULL);
+ std::string name(name_buf, input->name_length);
+ EXPECT_STREQ(expected.good_name, name.c_str());
+ ++input;
+ }
+ EXPECT_EQ(header->num_attribs + header->num_uniforms,
+ static_cast<uint32>(input - inputs));
+}
+
+// Some drivers optimize out unused uniform array elements, so their
+// location would be -1.
+TEST_F(ProgramManagerWithShaderTest, UnusedUniformArrayElements) {
+ CommonDecoder::Bucket bucket;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ // Emulate the situation that only the first element has a valid location.
+ // TODO(zmo): Don't assume these are in order.
+ for (size_t ii = 0; ii < arraysize(kUniforms); ++ii) {
+ Program::UniformInfo* uniform = const_cast<Program::UniformInfo*>(
+ program->GetUniformInfo(ii));
+ ASSERT_TRUE(uniform != NULL);
+ EXPECT_EQ(static_cast<size_t>(kUniforms[ii].size),
+ uniform->element_locations.size());
+ for (GLsizei jj = 1; jj < uniform->size; ++jj)
+ uniform->element_locations[jj] = -1;
+ }
+ program->GetProgramInfo(&manager_, &bucket);
+ ProgramInfoHeader* header =
+ bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(1u, header->link_status);
+ EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
+ EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
+ sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ ASSERT_TRUE(inputs != NULL);
+ const ProgramInput* input = inputs + header->num_attribs;
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const UniformInfo& expected = kUniforms[ii];
+ EXPECT_EQ(expected.size, input->size);
+ const int32* locations = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32) * input->size);
+ ASSERT_TRUE(locations != NULL);
+ EXPECT_EQ(
+ ProgramManager::MakeFakeLocation(expected.fake_location, 0),
+ locations[0]);
+ for (int32 jj = 1; jj < input->size; ++jj)
+ EXPECT_EQ(-1, locations[jj]);
+ ++input;
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, BindAttribLocationConflicts) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+ ShaderTranslator::VariableMap attrib_map;
+ for (uint32 ii = 0; ii < kNumAttribs; ++ii) {
+ attrib_map[kAttribs[ii].name] = ShaderTranslatorInterface::VariableInfo(
+ kAttribs[ii].type,
+ kAttribs[ii].size,
+ SH_PRECISION_MEDIUMP,
+ kAttribStaticUse,
+ kAttribs[ii].name);
+ }
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ ASSERT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL, &attrib_map, NULL, NULL, NULL);
+ // Check attrib infos got copied.
+ for (ShaderTranslator::VariableMap::const_iterator it = attrib_map.begin();
+ it != attrib_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ vshader->GetAttribInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_EQ(it->second.name, variable_info->name);
+ }
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL, &attrib_map, NULL, NULL, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib1Name, 0);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding("xxx", 0);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib2Name, 1);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib2Name, 0);
+ EXPECT_TRUE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+TEST_F(ProgramManagerWithShaderTest, UniformsPrecisionMismatch) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+
+ ShaderTranslator::VariableMap vertex_uniform_map;
+ vertex_uniform_map["a"] = ShaderTranslator::VariableInfo(
+ 1, 3, SH_PRECISION_MEDIUMP, 1, "a");
+ ShaderTranslator::VariableMap frag_uniform_map;
+ frag_uniform_map["a"] = ShaderTranslator::VariableInfo(
+ 1, 3, SH_PRECISION_LOWP, 1, "a");
+
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ ASSERT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL, NULL,
+ &vertex_uniform_map, NULL, NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL, NULL,
+ &frag_uniform_map, NULL, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectUniformsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different type in the vertex and fragment
+// shader, linking should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingTypeMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT_VEC3, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different array size in the vertex and fragment
+// shader, linking should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingArraySizeMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT, 2, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different precision in the vertex and fragment
+// shader, linking should succeed.
+TEST_F(ProgramManagerWithShaderTest, VaryingPrecisionMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT, 2, SH_PRECISION_HIGHP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 2, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_FALSE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_TRUE(conflicting_name.empty());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// If a varying is statically used in fragment shader but not
+// declared in vertex shader, link should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingMissing) {
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ NULL, 0, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying is declared but not statically used in fragment
+// shader, even if it's not declared in vertex shader, link should
+// succeed.
+TEST_F(ProgramManagerWithShaderTest, InactiveVarying) {
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ NULL, 0, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_FALSE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_TRUE(conflicting_name.empty());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// Uniforms and attributes are both global variables, thus sharing
+// the same namespace. Any name conflicts should cause link
+// failure.
+TEST_F(ProgramManagerWithShaderTest, AttribUniformNameConflict) {
+ const VarInfo kVertexAttribute =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarAttribute };
+ const VarInfo kFragmentUniform =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarUniform };
+ Program* program = SetupShaderVariableTest(
+ &kVertexAttribute, 1, &kFragmentUniform, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectGlobalNameConflicts(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// Varyings go over 8 rows.
+TEST_F(ProgramManagerWithShaderTest, TooManyVaryings) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_FALSE(
+ program->CheckVaryingsPacking(Program::kCountOnlyStaticallyUsed));
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// Varyings go over 8 rows but some are inactive
+TEST_F(ProgramManagerWithShaderTest, TooManyInactiveVaryings) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_TRUE(
+ program->CheckVaryingsPacking(Program::kCountOnlyStaticallyUsed));
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// Varyings go over 8 rows but some are inactive.
+// However, we still fail the check if kCountAll option is used.
+TEST_F(ProgramManagerWithShaderTest, CountAllVaryingsInPacking) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_FALSE(program->CheckVaryingsPacking(Program::kCountAll));
+}
+
+TEST_F(ProgramManagerWithShaderTest, ClearWithSamplerTypes) {
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ static const GLenum kSamplerTypes[] = {
+ GL_SAMPLER_2D,
+ GL_SAMPLER_CUBE,
+ GL_SAMPLER_EXTERNAL_OES,
+ GL_SAMPLER_3D_OES,
+ GL_SAMPLER_2D_RECT_ARB,
+ };
+ const size_t kNumSamplerTypes = arraysize(kSamplerTypes);
+ for (size_t ii = 0; ii < kNumSamplerTypes; ++ii) {
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kSamplerTypes[ii],
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumAttribs = arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ SetupExpectationsForClearingUniforms(kUniforms, kNumUniforms);
+ manager_.ClearUniforms(program);
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, BindUniformLocation) {
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+
+ const GLint kUniform1DesiredLocation = 10;
+ const GLint kUniform2DesiredLocation = -1;
+ const GLint kUniform3DesiredLocation = 5;
+
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->SetUniformLocationBinding(
+ kUniform1Name, kUniform1DesiredLocation));
+ EXPECT_TRUE(program->SetUniformLocationBinding(
+ kUniform3BadName, kUniform3DesiredLocation));
+
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+
+ const size_t kNumAttribs = arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+
+ EXPECT_EQ(kUniform1DesiredLocation,
+ program->GetUniformFakeLocation(kUniform1Name));
+ EXPECT_EQ(kUniform3DesiredLocation,
+ program->GetUniformFakeLocation(kUniform3BadName));
+ EXPECT_EQ(kUniform3DesiredLocation,
+ program->GetUniformFakeLocation(kUniform3GoodName));
+}
+
+class ProgramManagerWithCacheTest : public GpuServiceTest {
+ public:
+ static const GLuint kClientProgramId = 1;
+ static const GLuint kServiceProgramId = 10;
+ static const GLuint kVertexShaderClientId = 2;
+ static const GLuint kFragmentShaderClientId = 20;
+ static const GLuint kVertexShaderServiceId = 3;
+ static const GLuint kFragmentShaderServiceId = 30;
+
+ ProgramManagerWithCacheTest()
+ : cache_(new MockProgramCache()),
+ manager_(cache_.get(), kMaxVaryingVectors),
+ vertex_shader_(NULL),
+ fragment_shader_(NULL),
+ program_(NULL) {
+ }
+ virtual ~ProgramManagerWithCacheTest() {
+ manager_.Destroy(false);
+ shader_manager_.Destroy(false);
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ vertex_shader_ = shader_manager_.CreateShader(
+ kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
+ fragment_shader_ = shader_manager_.CreateShader(
+ kFragmentShaderClientId, kFragmentShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader_ != NULL);
+ ASSERT_TRUE(fragment_shader_ != NULL);
+ vertex_shader_->set_source("lka asjf bjajsdfj");
+ fragment_shader_->set_source("lka asjf a fasgag 3rdsf3 bjajsdfj");
+
+ program_ = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program_ != NULL);
+
+ program_->AttachShader(&shader_manager_, vertex_shader_);
+ program_->AttachShader(&shader_manager_, fragment_shader_);
+ }
+
+ void SetShadersCompiled() {
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+ }
+
+ void SetProgramCached() {
+ cache_->LinkedProgramCacheSuccess(
+ vertex_shader_->source(),
+ NULL,
+ fragment_shader_->source(),
+ NULL,
+ &program_->bind_attrib_location_map());
+ }
+
+ void SetExpectationsForProgramCached() {
+ SetExpectationsForProgramCached(program_,
+ vertex_shader_,
+ fragment_shader_);
+ }
+
+ void SetExpectationsForProgramCached(
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader) {
+ EXPECT_CALL(*cache_.get(), SaveLinkedProgram(
+ program->service_id(),
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _)).Times(1);
+ }
+
+ void SetExpectationsForNotCachingProgram() {
+ SetExpectationsForNotCachingProgram(program_,
+ vertex_shader_,
+ fragment_shader_);
+ }
+
+ void SetExpectationsForNotCachingProgram(
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader) {
+ EXPECT_CALL(*cache_.get(), SaveLinkedProgram(
+ program->service_id(),
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _)).Times(0);
+ }
+
+ void SetExpectationsForProgramLoad(ProgramCache::ProgramLoadResult result) {
+ SetExpectationsForProgramLoad(kServiceProgramId,
+ program_,
+ vertex_shader_,
+ fragment_shader_,
+ result);
+ }
+
+ void SetExpectationsForProgramLoad(
+ GLuint service_program_id,
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader,
+ ProgramCache::ProgramLoadResult result) {
+ EXPECT_CALL(*cache_.get(),
+ LoadLinkedProgram(service_program_id,
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _))
+ .WillOnce(Return(result));
+ }
+
+ void SetExpectationsForProgramLoadSuccess() {
+ SetExpectationsForProgramLoadSuccess(kServiceProgramId);
+ }
+
+ void SetExpectationsForProgramLoadSuccess(GLuint service_program_id) {
+ TestHelper::SetupProgramSuccessExpectations(gl_.get(),
+ NULL,
+ 0,
+ NULL,
+ 0,
+ service_program_id);
+ }
+
+ void SetExpectationsForProgramLink() {
+ SetExpectationsForProgramLink(kServiceProgramId);
+ }
+
+ void SetExpectationsForProgramLink(GLuint service_program_id) {
+ TestHelper::SetupShader(gl_.get(), NULL, 0, NULL, 0, service_program_id);
+ if (gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary) {
+ EXPECT_CALL(*gl_.get(),
+ ProgramParameteri(service_program_id,
+ PROGRAM_BINARY_RETRIEVABLE_HINT,
+ GL_TRUE)).Times(1);
+ }
+ }
+
+ void SetExpectationsForSuccessCompile(
+ const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(1);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(1);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE));
+ }
+
+ void SetExpectationsForNoCompile(const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(0);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(0);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .Times(0);
+ }
+
+ void SetExpectationsForErrorCompile(const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(1);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(1);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_FALSE));
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_.get(), GetShaderInfoLog(shader_id, 0, _, _))
+ .Times(1);
+ }
+
+ scoped_ptr<MockProgramCache> cache_;
+ ProgramManager manager_;
+
+ Shader* vertex_shader_;
+ Shader* fragment_shader_;
+ Program* program_;
+ ShaderManager shader_manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint ProgramManagerWithCacheTest::kClientProgramId;
+const GLuint ProgramManagerWithCacheTest::kServiceProgramId;
+const GLuint ProgramManagerWithCacheTest::kVertexShaderClientId;
+const GLuint ProgramManagerWithCacheTest::kFragmentShaderClientId;
+const GLuint ProgramManagerWithCacheTest::kVertexShaderServiceId;
+const GLuint ProgramManagerWithCacheTest::kFragmentShaderServiceId;
+#endif
+
+TEST_F(ProgramManagerWithCacheTest, CacheProgramOnSuccessfulLink) {
+ SetShadersCompiled();
+ SetExpectationsForProgramLink();
+ SetExpectationsForProgramCached();
+ EXPECT_TRUE(program_->Link(NULL, NULL, NULL,
+ Program::kCountOnlyStaticallyUsed, base::Bind(&ShaderCacheCb)));
+}
+
+TEST_F(ProgramManagerWithCacheTest, LoadProgramOnProgramCacheHit) {
+ SetShadersCompiled();
+ SetProgramCached();
+
+ SetExpectationsForNoCompile(vertex_shader_);
+ SetExpectationsForNoCompile(fragment_shader_);
+ SetExpectationsForProgramLoad(ProgramCache::PROGRAM_LOAD_SUCCESS);
+ SetExpectationsForNotCachingProgram();
+ SetExpectationsForProgramLoadSuccess();
+
+ EXPECT_TRUE(program_->Link(NULL, NULL, NULL,
+ Program::kCountOnlyStaticallyUsed, base::Bind(&ShaderCacheCb)));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/query_manager.cc b/gpu/command_buffer/service/query_manager.cc
new file mode 100644
index 0000000..1d36c89
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager.cc
@@ -0,0 +1,747 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/query_manager.h"
+
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/numerics/safe_math.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "ui/gl/gl_fence.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+class AsyncPixelTransferCompletionObserverImpl
+ : public AsyncPixelTransferCompletionObserver {
+ public:
+ AsyncPixelTransferCompletionObserverImpl(base::subtle::Atomic32 submit_count)
+ : submit_count_(submit_count), cancelled_(false) {}
+
+ void Cancel() {
+ base::AutoLock locked(lock_);
+ cancelled_ = true;
+ }
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) OVERRIDE {
+ base::AutoLock locked(lock_);
+ if (!cancelled_) {
+ DCHECK(mem_params.buffer().get());
+ void* data = mem_params.GetDataAddress();
+ QuerySync* sync = static_cast<QuerySync*>(data);
+ base::subtle::Release_Store(&sync->process_count, submit_count_);
+ }
+ }
+
+ private:
+ virtual ~AsyncPixelTransferCompletionObserverImpl() {}
+
+ base::subtle::Atomic32 submit_count_;
+
+ base::Lock lock_;
+ bool cancelled_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferCompletionObserverImpl);
+};
+
+class AsyncPixelTransfersCompletedQuery
+ : public QueryManager::Query,
+ public base::SupportsWeakPtr<AsyncPixelTransfersCompletedQuery> {
+ public:
+ AsyncPixelTransfersCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~AsyncPixelTransfersCompletedQuery();
+
+ scoped_refptr<AsyncPixelTransferCompletionObserverImpl> observer_;
+};
+
+AsyncPixelTransfersCompletedQuery::AsyncPixelTransfersCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool AsyncPixelTransfersCompletedQuery::Begin() {
+ return true;
+}
+
+bool AsyncPixelTransfersCompletedQuery::End(
+ base::subtle::Atomic32 submit_count) {
+ // Get the real shared memory since it might need to be duped to prevent
+ // use-after-free of the memory.
+ scoped_refptr<Buffer> buffer =
+ manager()->decoder()->GetSharedMemoryBuffer(shm_id());
+ if (!buffer.get())
+ return false;
+ AsyncMemoryParams mem_params(buffer, shm_offset(), sizeof(QuerySync));
+ if (!mem_params.GetDataAddress())
+ return false;
+
+ observer_ = new AsyncPixelTransferCompletionObserverImpl(submit_count);
+
+ // Ask AsyncPixelTransferDelegate to run completion callback after all
+ // previous async transfers are done. No guarantee that callback is run
+ // on the current thread.
+ manager()->decoder()->GetAsyncPixelTransferManager()->AsyncNotifyCompletion(
+ mem_params, observer_.get());
+
+ return AddToPendingTransferQueue(submit_count);
+}
+
+bool AsyncPixelTransfersCompletedQuery::Process() {
+ QuerySync* sync = manager()->decoder()->GetSharedMemoryAs<QuerySync*>(
+ shm_id(), shm_offset(), sizeof(*sync));
+ if (!sync)
+ return false;
+
+ // Check if completion callback has been run. sync->process_count atomicity
+ // is guaranteed as this is already used to notify client of a completed
+ // query.
+ if (base::subtle::Acquire_Load(&sync->process_count) != submit_count())
+ return true;
+
+ UnmarkAsPending();
+ return true;
+}
+
+void AsyncPixelTransfersCompletedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+AsyncPixelTransfersCompletedQuery::~AsyncPixelTransfersCompletedQuery() {
+ if (observer_.get())
+ observer_->Cancel();
+}
+
+} // namespace
+
+class AllSamplesPassedQuery : public QueryManager::Query {
+ public:
+ AllSamplesPassedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset,
+ GLuint service_id);
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~AllSamplesPassedQuery();
+
+ private:
+ // Service side query id.
+ GLuint service_id_;
+};
+
+AllSamplesPassedQuery::AllSamplesPassedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset,
+ GLuint service_id)
+ : Query(manager, target, shm_id, shm_offset),
+ service_id_(service_id) {
+}
+
+bool AllSamplesPassedQuery::Begin() {
+ BeginQueryHelper(target(), service_id_);
+ return true;
+}
+
+bool AllSamplesPassedQuery::End(base::subtle::Atomic32 submit_count) {
+ EndQueryHelper(target());
+ return AddToPendingQueue(submit_count);
+}
+
+bool AllSamplesPassedQuery::Process() {
+ GLuint available = 0;
+ glGetQueryObjectuivARB(
+ service_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ if (!available) {
+ return true;
+ }
+ GLuint result = 0;
+ glGetQueryObjectuivARB(
+ service_id_, GL_QUERY_RESULT_EXT, &result);
+
+ return MarkAsCompleted(result != 0);
+}
+
+void AllSamplesPassedQuery::Destroy(bool have_context) {
+ if (have_context && !IsDeleted()) {
+ glDeleteQueriesARB(1, &service_id_);
+ MarkAsDeleted();
+ }
+}
+
+AllSamplesPassedQuery::~AllSamplesPassedQuery() {
+}
+
+class CommandsIssuedQuery : public QueryManager::Query {
+ public:
+ CommandsIssuedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandsIssuedQuery();
+
+ private:
+ base::TimeTicks begin_time_;
+};
+
+CommandsIssuedQuery::CommandsIssuedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool CommandsIssuedQuery::Begin() {
+ begin_time_ = base::TimeTicks::HighResNow();
+ return true;
+}
+
+bool CommandsIssuedQuery::End(base::subtle::Atomic32 submit_count) {
+ base::TimeDelta elapsed = base::TimeTicks::HighResNow() - begin_time_;
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(elapsed.InMicroseconds());
+}
+
+bool CommandsIssuedQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void CommandsIssuedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+CommandsIssuedQuery::~CommandsIssuedQuery() {
+}
+
+class CommandLatencyQuery : public QueryManager::Query {
+ public:
+ CommandLatencyQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandLatencyQuery();
+};
+
+CommandLatencyQuery::CommandLatencyQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool CommandLatencyQuery::Begin() {
+ return true;
+}
+
+bool CommandLatencyQuery::End(base::subtle::Atomic32 submit_count) {
+ base::TimeDelta now = base::TimeTicks::HighResNow() - base::TimeTicks();
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(now.InMicroseconds());
+}
+
+bool CommandLatencyQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void CommandLatencyQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+CommandLatencyQuery::~CommandLatencyQuery() {
+}
+
+
+class AsyncReadPixelsCompletedQuery
+ : public QueryManager::Query,
+ public base::SupportsWeakPtr<AsyncReadPixelsCompletedQuery> {
+ public:
+ AsyncReadPixelsCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ void Complete();
+ virtual ~AsyncReadPixelsCompletedQuery();
+
+ private:
+ bool completed_;
+ bool complete_result_;
+};
+
+AsyncReadPixelsCompletedQuery::AsyncReadPixelsCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset),
+ completed_(false),
+ complete_result_(false) {
+}
+
+bool AsyncReadPixelsCompletedQuery::Begin() {
+ return true;
+}
+
+bool AsyncReadPixelsCompletedQuery::End(base::subtle::Atomic32 submit_count) {
+ if (!AddToPendingQueue(submit_count)) {
+ return false;
+ }
+ manager()->decoder()->WaitForReadPixels(
+ base::Bind(&AsyncReadPixelsCompletedQuery::Complete,
+ AsWeakPtr()));
+
+ return Process();
+}
+
+void AsyncReadPixelsCompletedQuery::Complete() {
+ completed_ = true;
+ complete_result_ = MarkAsCompleted(1);
+}
+
+bool AsyncReadPixelsCompletedQuery::Process() {
+ return !completed_ || complete_result_;
+}
+
+void AsyncReadPixelsCompletedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+AsyncReadPixelsCompletedQuery::~AsyncReadPixelsCompletedQuery() {
+}
+
+
+class GetErrorQuery : public QueryManager::Query {
+ public:
+ GetErrorQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~GetErrorQuery();
+
+ private:
+};
+
+GetErrorQuery::GetErrorQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool GetErrorQuery::Begin() {
+ return true;
+}
+
+bool GetErrorQuery::End(base::subtle::Atomic32 submit_count) {
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(manager()->decoder()->GetErrorState()->GetGLError());
+}
+
+bool GetErrorQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void GetErrorQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+GetErrorQuery::~GetErrorQuery() {
+}
+
+class CommandsCompletedQuery : public QueryManager::Query {
+ public:
+ CommandsCompletedQuery(QueryManager* manager,
+ GLenum target,
+ int32 shm_id,
+ uint32 shm_offset);
+
+ // Overridden from QueryManager::Query:
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandsCompletedQuery();
+
+ private:
+ scoped_ptr<gfx::GLFence> fence_;
+};
+
+CommandsCompletedQuery::CommandsCompletedQuery(QueryManager* manager,
+ GLenum target,
+ int32 shm_id,
+ uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {}
+
+bool CommandsCompletedQuery::Begin() { return true; }
+
+bool CommandsCompletedQuery::End(base::subtle::Atomic32 submit_count) {
+ fence_.reset(gfx::GLFence::Create());
+ DCHECK(fence_);
+ return AddToPendingQueue(submit_count);
+}
+
+bool CommandsCompletedQuery::Process() {
+ if (fence_ && !fence_->HasCompleted())
+ return true;
+ return MarkAsCompleted(0);
+}
+
+void CommandsCompletedQuery::Destroy(bool have_context) {
+ if (have_context && !IsDeleted()) {
+ fence_.reset();
+ MarkAsDeleted();
+ }
+}
+
+CommandsCompletedQuery::~CommandsCompletedQuery() {}
+
+QueryManager::QueryManager(
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info)
+ : decoder_(decoder),
+ use_arb_occlusion_query2_for_occlusion_query_boolean_(
+ feature_info->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean),
+ use_arb_occlusion_query_for_occlusion_query_boolean_(
+ feature_info->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean),
+ query_count_(0) {
+ DCHECK(!(use_arb_occlusion_query_for_occlusion_query_boolean_ &&
+ use_arb_occlusion_query2_for_occlusion_query_boolean_));
+}
+
+QueryManager::~QueryManager() {
+ DCHECK(queries_.empty());
+
+ // If this triggers, that means something is keeping a reference to
+ // a Query belonging to this.
+ CHECK_EQ(query_count_, 0u);
+}
+
+void QueryManager::Destroy(bool have_context) {
+ pending_queries_.clear();
+ pending_transfer_queries_.clear();
+ while (!queries_.empty()) {
+ Query* query = queries_.begin()->second.get();
+ query->Destroy(have_context);
+ queries_.erase(queries_.begin());
+ }
+}
+
+QueryManager::Query* QueryManager::CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset) {
+ scoped_refptr<Query> query;
+ switch (target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ query = new CommandsIssuedQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ query = new CommandLatencyQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ // Currently async pixel transfer delegates only support uploads.
+ query = new AsyncPixelTransfersCompletedQuery(
+ this, target, shm_id, shm_offset);
+ break;
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ query = new AsyncReadPixelsCompletedQuery(
+ this, target, shm_id, shm_offset);
+ break;
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ query = new GetErrorQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ query = new CommandsCompletedQuery(this, target, shm_id, shm_offset);
+ break;
+ default: {
+ GLuint service_id = 0;
+ glGenQueriesARB(1, &service_id);
+ DCHECK_NE(0u, service_id);
+ query = new AllSamplesPassedQuery(
+ this, target, shm_id, shm_offset, service_id);
+ break;
+ }
+ }
+ std::pair<QueryMap::iterator, bool> result =
+ queries_.insert(std::make_pair(client_id, query));
+ DCHECK(result.second);
+ return query.get();
+}
+
+void QueryManager::GenQueries(GLsizei n, const GLuint* queries) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ generated_query_ids_.insert(queries[i]);
+ }
+}
+
+bool QueryManager::IsValidQuery(GLuint id) {
+ GeneratedQueryIds::iterator it = generated_query_ids_.find(id);
+ return it != generated_query_ids_.end();
+}
+
+QueryManager::Query* QueryManager::GetQuery(
+ GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ return it != queries_.end() ? it->second.get() : NULL;
+}
+
+void QueryManager::RemoveQuery(GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ if (it != queries_.end()) {
+ Query* query = it->second.get();
+ RemovePendingQuery(query);
+ query->MarkAsDeleted();
+ queries_.erase(it);
+ }
+ generated_query_ids_.erase(client_id);
+}
+
+void QueryManager::StartTracking(QueryManager::Query* /* query */) {
+ ++query_count_;
+}
+
+void QueryManager::StopTracking(QueryManager::Query* /* query */) {
+ --query_count_;
+}
+
+GLenum QueryManager::AdjustTargetForEmulation(GLenum target) {
+ switch (target) {
+ case GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT:
+ case GL_ANY_SAMPLES_PASSED_EXT:
+ if (use_arb_occlusion_query2_for_occlusion_query_boolean_) {
+ // ARB_occlusion_query2 does not have a
+ // GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT
+ // target.
+ target = GL_ANY_SAMPLES_PASSED_EXT;
+ } else if (use_arb_occlusion_query_for_occlusion_query_boolean_) {
+ // ARB_occlusion_query does not have a
+ // GL_ANY_SAMPLES_PASSED_EXT
+ // target.
+ target = GL_SAMPLES_PASSED_ARB;
+ }
+ break;
+ default:
+ break;
+ }
+ return target;
+}
+
+void QueryManager::BeginQueryHelper(GLenum target, GLuint id) {
+ target = AdjustTargetForEmulation(target);
+ glBeginQueryARB(target, id);
+}
+
+void QueryManager::EndQueryHelper(GLenum target) {
+ target = AdjustTargetForEmulation(target);
+ glEndQueryARB(target);
+}
+
+QueryManager::Query::Query(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : manager_(manager),
+ target_(target),
+ shm_id_(shm_id),
+ shm_offset_(shm_offset),
+ submit_count_(0),
+ pending_(false),
+ deleted_(false) {
+ DCHECK(manager);
+ manager_->StartTracking(this);
+}
+
+void QueryManager::Query::RunCallbacks() {
+ for (size_t i = 0; i < callbacks_.size(); i++) {
+ callbacks_[i].Run();
+ }
+ callbacks_.clear();
+}
+
+void QueryManager::Query::AddCallback(base::Closure callback) {
+ if (pending_) {
+ callbacks_.push_back(callback);
+ } else {
+ callback.Run();
+ }
+}
+
+QueryManager::Query::~Query() {
+ // The query is getting deleted, either by the client or
+ // because the context was lost. Call any outstanding
+ // callbacks to avoid leaks.
+ RunCallbacks();
+ if (manager_) {
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+bool QueryManager::Query::MarkAsCompleted(uint64 result) {
+ DCHECK(pending_);
+ QuerySync* sync = manager_->decoder_->GetSharedMemoryAs<QuerySync*>(
+ shm_id_, shm_offset_, sizeof(*sync));
+ if (!sync) {
+ return false;
+ }
+
+ pending_ = false;
+ sync->result = result;
+ base::subtle::Release_Store(&sync->process_count, submit_count_);
+
+ return true;
+}
+
+bool QueryManager::ProcessPendingQueries() {
+ while (!pending_queries_.empty()) {
+ Query* query = pending_queries_.front().get();
+ if (!query->Process()) {
+ return false;
+ }
+ if (query->pending()) {
+ break;
+ }
+ query->RunCallbacks();
+ pending_queries_.pop_front();
+ }
+
+ return true;
+}
+
+bool QueryManager::HavePendingQueries() {
+ return !pending_queries_.empty();
+}
+
+bool QueryManager::ProcessPendingTransferQueries() {
+ while (!pending_transfer_queries_.empty()) {
+ Query* query = pending_transfer_queries_.front().get();
+ if (!query->Process()) {
+ return false;
+ }
+ if (query->pending()) {
+ break;
+ }
+ query->RunCallbacks();
+ pending_transfer_queries_.pop_front();
+ }
+
+ return true;
+}
+
+bool QueryManager::HavePendingTransferQueries() {
+ return !pending_transfer_queries_.empty();
+}
+
+bool QueryManager::AddPendingQuery(Query* query,
+ base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ DCHECK(!query->IsDeleted());
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ query->MarkAsPending(submit_count);
+ pending_queries_.push_back(query);
+ return true;
+}
+
+bool QueryManager::AddPendingTransferQuery(
+ Query* query,
+ base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ DCHECK(!query->IsDeleted());
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ query->MarkAsPending(submit_count);
+ pending_transfer_queries_.push_back(query);
+ return true;
+}
+
+bool QueryManager::RemovePendingQuery(Query* query) {
+ DCHECK(query);
+ if (query->pending()) {
+ // TODO(gman): Speed this up if this is a common operation. This would only
+ // happen if you do being/end begin/end on the same query without waiting
+ // for the first one to finish.
+ for (QueryQueue::iterator it = pending_queries_.begin();
+ it != pending_queries_.end(); ++it) {
+ if (it->get() == query) {
+ pending_queries_.erase(it);
+ break;
+ }
+ }
+ for (QueryQueue::iterator it = pending_transfer_queries_.begin();
+ it != pending_transfer_queries_.end(); ++it) {
+ if (it->get() == query) {
+ pending_transfer_queries_.erase(it);
+ break;
+ }
+ }
+ if (!query->MarkAsCompleted(0)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool QueryManager::BeginQuery(Query* query) {
+ DCHECK(query);
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ return query->Begin();
+}
+
+bool QueryManager::EndQuery(Query* query, base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ return query->End(submit_count);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/query_manager.h b/gpu/command_buffer/service/query_manager.h
new file mode 100644
index 0000000..62da3b8
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager.h
@@ -0,0 +1,249 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
+
+#include <deque>
+#include <vector>
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class GLES2Decoder;
+
+namespace gles2 {
+
+class FeatureInfo;
+
+// This class keeps track of the queries and their state
+// As Queries are not shared there is one QueryManager per context.
+class GPU_EXPORT QueryManager {
+ public:
+ class GPU_EXPORT Query : public base::RefCounted<Query> {
+ public:
+ Query(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ GLenum target() const {
+ return target_;
+ }
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return target() && !IsDeleted();
+ }
+
+ bool pending() const {
+ return pending_;
+ }
+
+ int32 shm_id() const {
+ return shm_id_;
+ }
+
+ uint32 shm_offset() const {
+ return shm_offset_;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool Begin() = 0;
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool End(base::subtle::Atomic32 submit_count) = 0;
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool Process() = 0;
+
+ virtual void Destroy(bool have_context) = 0;
+
+ void AddCallback(base::Closure callback);
+
+ protected:
+ virtual ~Query();
+
+ QueryManager* manager() const {
+ return manager_;
+ }
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool MarkAsCompleted(uint64 result);
+
+ void MarkAsPending(base::subtle::Atomic32 submit_count) {
+ DCHECK(!pending_);
+ pending_ = true;
+ submit_count_ = submit_count;
+ }
+
+ void UnmarkAsPending() {
+ DCHECK(pending_);
+ pending_ = false;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool AddToPendingQueue(base::subtle::Atomic32 submit_count) {
+ return manager_->AddPendingQuery(this, submit_count);
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool AddToPendingTransferQueue(base::subtle::Atomic32 submit_count) {
+ return manager_->AddPendingTransferQuery(this, submit_count);
+ }
+
+ void BeginQueryHelper(GLenum target, GLuint id) {
+ manager_->BeginQueryHelper(target, id);
+ }
+
+ void EndQueryHelper(GLenum target) {
+ manager_->EndQueryHelper(target);
+ }
+
+ base::subtle::Atomic32 submit_count() const { return submit_count_; }
+
+ private:
+ friend class QueryManager;
+ friend class QueryManagerTest;
+ friend class base::RefCounted<Query>;
+
+ void RunCallbacks();
+
+ // The manager that owns this Query.
+ QueryManager* manager_;
+
+ // The type of query.
+ GLenum target_;
+
+ // The shared memory used with this Query.
+ int32 shm_id_;
+ uint32 shm_offset_;
+
+ // Count to set process count do when completed.
+ base::subtle::Atomic32 submit_count_;
+
+ // True if in the queue.
+ bool pending_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // List of callbacks to run when result is available.
+ std::vector<base::Closure> callbacks_;
+ };
+
+ QueryManager(
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info);
+ ~QueryManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Query for the given query.
+ Query* CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset);
+
+ // Gets the query info for the given query.
+ Query* GetQuery(GLuint client_id);
+
+ // Removes a query info for the given query.
+ void RemoveQuery(GLuint client_id);
+
+ // Returns false if any query is pointing to invalid shared memory.
+ bool BeginQuery(Query* query);
+
+ // Returns false if any query is pointing to invalid shared memory.
+ bool EndQuery(Query* query, base::subtle::Atomic32 submit_count);
+
+ // Processes pending queries. Returns false if any queries are pointing
+ // to invalid shared memory.
+ bool ProcessPendingQueries();
+
+ // True if there are pending queries.
+ bool HavePendingQueries();
+
+ // Processes pending transfer queries. Returns false if any queries are
+ // pointing to invalid shared memory.
+ bool ProcessPendingTransferQueries();
+
+ // True if there are pending transfer queries.
+ bool HavePendingTransferQueries();
+
+ GLES2Decoder* decoder() const {
+ return decoder_;
+ }
+
+ void GenQueries(GLsizei n, const GLuint* queries);
+ bool IsValidQuery(GLuint id);
+
+ private:
+ void StartTracking(Query* query);
+ void StopTracking(Query* query);
+
+ // Wrappers for BeginQueryARB and EndQueryARB to hide differences between
+ // ARB_occlusion_query2 and EXT_occlusion_query_boolean.
+ void BeginQueryHelper(GLenum target, GLuint id);
+ void EndQueryHelper(GLenum target);
+
+ // Adds to queue of queries waiting for completion.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool AddPendingQuery(Query* query, base::subtle::Atomic32 submit_count);
+
+ // Adds to queue of transfer queries waiting for completion.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool AddPendingTransferQuery(Query* query,
+ base::subtle::Atomic32 submit_count);
+
+ // Removes a query from the queue of pending queries.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool RemovePendingQuery(Query* query);
+
+ // Returns a target used for the underlying GL extension
+ // used to emulate a query.
+ GLenum AdjustTargetForEmulation(GLenum target);
+
+ // Used to validate shared memory and get GL errors.
+ GLES2Decoder* decoder_;
+
+ bool use_arb_occlusion_query2_for_occlusion_query_boolean_;
+ bool use_arb_occlusion_query_for_occlusion_query_boolean_;
+
+ // Counts the number of Queries allocated with 'this' as their manager.
+ // Allows checking no Query will outlive this.
+ unsigned query_count_;
+
+ // Info for each query in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Query> > QueryMap;
+ QueryMap queries_;
+
+ typedef base::hash_set<GLuint> GeneratedQueryIds;
+ GeneratedQueryIds generated_query_ids_;
+
+ // Queries waiting for completion.
+ typedef std::deque<scoped_refptr<Query> > QueryQueue;
+ QueryQueue pending_queries_;
+
+ // Async pixel transfer queries waiting for completion.
+ QueryQueue pending_transfer_queries_;
+
+ DISALLOW_COPY_AND_ASSIGN(QueryManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
diff --git a/gpu/command_buffer/service/query_manager_unittest.cc b/gpu/command_buffer/service/query_manager_unittest.cc
new file mode 100644
index 0000000..9f0156f
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager_unittest.cc
@@ -0,0 +1,575 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::SetArgumentPointee;
+
+namespace gpu {
+namespace gles2 {
+
+class QueryManagerTest : public GpuServiceTest {
+ public:
+ static const int32 kSharedMemoryId = 401;
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32 kSharedMemoryOffset = 132;
+ static const int32 kInvalidSharedMemoryId = 402;
+ static const uint32 kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const uint32 kInitialResult = 0xBDBDBDBDu;
+ static const uint8 kInitialMemoryValue = 0xBDu;
+
+ QueryManagerTest() {
+ }
+ virtual ~QueryManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ engine_.reset(new MockCommandBufferEngine());
+ decoder_.reset(new MockGLES2Decoder());
+ decoder_->set_engine(engine_.get());
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_EXT_occlusion_query_boolean");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ manager_.reset(new QueryManager(decoder_.get(), feature_info.get()));
+ }
+
+ virtual void TearDown() {
+ decoder_.reset();
+ manager_->Destroy(false);
+ manager_.reset();
+ engine_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ QueryManager::Query* CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset,
+ GLuint service_id) {
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(service_id))
+ .RetiresOnSaturation();
+ return manager_->CreateQuery(target, client_id, shm_id, shm_offset);
+ }
+
+ void QueueQuery(QueryManager::Query* query,
+ GLuint service_id,
+ base::subtle::Atomic32 submit_count) {
+ EXPECT_CALL(*gl_, BeginQueryARB(query->target(), service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(query->target()))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->BeginQuery(query));
+ EXPECT_TRUE(manager_->EndQuery(query, submit_count));
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_ptr<QueryManager> manager_;
+
+ private:
+ class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ MockCommandBufferEngine() {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kSharedBufferSize);
+ valid_buffer_ =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), kSharedBufferSize);
+ data_ = static_cast<uint8*>(valid_buffer_->memory());
+ ClearSharedMemory();
+ }
+
+ virtual ~MockCommandBufferEngine() {
+ }
+
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE {
+ return shm_id == kSharedMemoryId ? valid_buffer_ : invalid_buffer_;
+ }
+
+ void ClearSharedMemory() {
+ memset(data_, kInitialMemoryValue, kSharedBufferSize);
+ }
+
+ virtual void set_token(int32 token) OVERRIDE {
+ DCHECK(false);
+ }
+
+ virtual bool SetGetBuffer(int32 /* transfer_buffer_id */) OVERRIDE {
+ DCHECK(false);
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE {
+ DCHECK(false);
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE {
+ DCHECK(false);
+ return 0;
+ }
+
+ private:
+ uint8* data_;
+ scoped_refptr<gpu::Buffer> valid_buffer_;
+ scoped_refptr<gpu::Buffer> invalid_buffer_;
+ };
+
+ scoped_ptr<MockCommandBufferEngine> engine_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const int32 QueryManagerTest::kSharedMemoryId;
+const size_t QueryManagerTest::kSharedBufferSize;
+const uint32 QueryManagerTest::kSharedMemoryOffset;
+const int32 QueryManagerTest::kInvalidSharedMemoryId;
+const uint32 QueryManagerTest::kInvalidSharedMemoryOffset;
+const uint32 QueryManagerTest::kInitialResult;
+const uint8 QueryManagerTest::kInitialMemoryValue;
+#endif
+
+TEST_F(QueryManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+
+ EXPECT_FALSE(manager_->HavePendingQueries());
+ // Check we can create a Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(GL_ANY_SAMPLES_PASSED_EXT, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+ // Check we can get the same Query.
+ EXPECT_EQ(query.get(), manager_->GetQuery(kClient1Id));
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient2Id) == NULL);
+ // Check we can delete the query.
+ manager_->RemoveQuery(kClient1Id);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient1Id) == NULL);
+ // Check query is deleted
+ EXPECT_TRUE(query->IsDeleted());
+ EXPECT_FALSE(manager_->HavePendingQueries());
+}
+
+TEST_F(QueryManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(GL_ANY_SAMPLES_PASSED_EXT, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+ EXPECT_CALL(*gl_, DeleteQueriesARB(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient1Id) == NULL);
+ // Check query is deleted
+ EXPECT_TRUE(query->IsDeleted());
+}
+
+TEST_F(QueryManagerTest, QueryBasic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ EXPECT_TRUE(query->IsValid());
+ EXPECT_FALSE(query->IsDeleted());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(kTarget, query->target());
+ EXPECT_EQ(kSharedMemoryId, query->shm_id());
+ EXPECT_EQ(kSharedMemoryOffset, query->shm_offset());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Check nothing happens if there are no pending queries.
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Setup shared memory like client would.
+ QuerySync* sync = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync));
+ ASSERT_TRUE(sync != NULL);
+ sync->Reset();
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+ EXPECT_TRUE(query->pending());
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with return not available.
+ // Expect 1 GL command.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_TRUE(query->pending());
+ EXPECT_EQ(0, sync->process_count);
+ EXPECT_EQ(0u, sync->result);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(kSubmitCount, sync->process_count);
+ EXPECT_EQ(kResult, sync->result);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+
+ // Process with no queries.
+ // Expect no GL commands/
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingQueries) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ const GLuint kService2Id = 12;
+ const GLuint kClient3Id = 3;
+ const GLuint kService3Id = 13;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount1 = 123;
+ const base::subtle::Atomic32 kSubmitCount2 = 123;
+ const base::subtle::Atomic32 kSubmitCount3 = 123;
+ const GLuint kResult1 = 1;
+ const GLuint kResult2 = 1;
+ const GLuint kResult3 = 1;
+
+ // Setup shared memory like client would.
+ QuerySync* sync1 = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync1) * 3);
+ ASSERT_TRUE(sync1 != NULL);
+ QuerySync* sync2 = sync1 + 1;
+ QuerySync* sync3 = sync2 + 1;
+
+ // Create Queries.
+ scoped_refptr<QueryManager::Query> query1(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 0,
+ kService1Id));
+ scoped_refptr<QueryManager::Query> query2(
+ CreateQuery(kTarget, kClient2Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 1,
+ kService2Id));
+ scoped_refptr<QueryManager::Query> query3(
+ CreateQuery(kTarget, kClient3Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 2,
+ kService3Id));
+ ASSERT_TRUE(query1.get() != NULL);
+ ASSERT_TRUE(query2.get() != NULL);
+ ASSERT_TRUE(query3.get() != NULL);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+
+ sync1->Reset();
+ sync2->Reset();
+ sync3->Reset();
+
+ // Queue them
+ QueueQuery(query1.get(), kService1Id, kSubmitCount1);
+ QueueQuery(query2.get(), kService2Id, kSubmitCount2);
+ QueueQuery(query3.get(), kService3Id, kSubmitCount3);
+ EXPECT_TRUE(query1->pending());
+ EXPECT_TRUE(query2->pending());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with return available for first 2 queries.
+ // Expect 4 GL commands.
+ {
+ InSequence s;
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService2Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService2Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ }
+ EXPECT_FALSE(query1->pending());
+ EXPECT_FALSE(query2->pending());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_EQ(kSubmitCount1, sync1->process_count);
+ EXPECT_EQ(kSubmitCount2, sync2->process_count);
+ EXPECT_EQ(kResult1, sync1->result);
+ EXPECT_EQ(kResult2, sync2->result);
+ EXPECT_EQ(0, sync3->process_count);
+ EXPECT_EQ(0u, sync3->result);
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with renaming query. No result.
+ // Expect 1 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_EQ(0, sync3->process_count);
+ EXPECT_EQ(0u, sync3->result);
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with renaming query. With result.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult3))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_FALSE(query3->pending());
+ EXPECT_EQ(kSubmitCount3, sync3->process_count);
+ EXPECT_EQ(kResult3, sync3->result);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingBadSharedMemoryId) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kInvalidSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_FALSE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingBadSharedMemoryOffset) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kInvalidSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_FALSE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ExitWithPendingQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+}
+
+// Test that when based on ARB_occlusion_query2 we use GL_ANY_SAMPLES_PASSED_ARB
+// for GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT
+TEST_F(QueryManagerTest, ARBOcclusionQuery2) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_ARB_occlusion_query2");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kService1Id))
+ .RetiresOnSaturation();
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_ANY_SAMPLES_PASSED_EXT, kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(GL_ANY_SAMPLES_PASSED_EXT))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager->BeginQuery(query));
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ manager->Destroy(false);
+}
+
+// Test that when based on ARB_occlusion_query we use GL_SAMPLES_PASSED_ARB
+// for GL_ANY_SAMPLES_PASSED_EXT
+TEST_F(QueryManagerTest, ARBOcclusionQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_ARB_occlusion_query");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kService1Id))
+ .RetiresOnSaturation();
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_SAMPLES_PASSED_ARB, kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(GL_SAMPLES_PASSED_ARB))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager->BeginQuery(query));
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ manager->Destroy(false);
+}
+
+TEST_F(QueryManagerTest, GetErrorQuery) {
+ const GLuint kClient1Id = 1;
+ const GLenum kTarget = GL_GET_ERROR_QUERY_CHROMIUM;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), "");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ // Setup shared memory like client would.
+ QuerySync* sync = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync));
+ ASSERT_TRUE(sync != NULL);
+ sync->Reset();
+
+ EXPECT_TRUE(manager->BeginQuery(query));
+
+ MockErrorState mock_error_state;
+ EXPECT_CALL(*decoder_.get(), GetErrorState())
+ .WillRepeatedly(Return(&mock_error_state));
+ EXPECT_CALL(mock_error_state, GetGLError())
+ .WillOnce(Return(GL_INVALID_ENUM))
+ .RetiresOnSaturation();
+
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ EXPECT_FALSE(query->pending());
+
+ EXPECT_EQ(static_cast<GLuint>(GL_INVALID_ENUM), sync->result);
+
+ manager->Destroy(false);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/renderbuffer_manager.cc b/gpu/command_buffer/service/renderbuffer_manager.cc
new file mode 100644
index 0000000..ff8ae7b
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager.cc
@@ -0,0 +1,233 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This should contain everything to uniquely identify a Renderbuffer.
+static const char RenderbufferTag[] = "|Renderbuffer|";
+struct RenderbufferSignature {
+ GLenum internal_format_;
+ GLsizei samples_;
+ GLsizei width_;
+ GLsizei height_;
+
+ // Since we will be hashing this signature structure, the padding must be
+ // zero initialized. Although the C++11 specifications specify that this is
+ // true, we will use a constructor with a memset to further enforce it instead
+ // of relying on compilers adhering to this deep dark corner specification.
+ RenderbufferSignature(GLenum internal_format,
+ GLsizei samples,
+ GLsizei width,
+ GLsizei height) {
+ memset(this, 0, sizeof(RenderbufferSignature));
+ internal_format_ = internal_format;
+ samples_ = samples;
+ width_ = width;
+ height_ = height;
+ }
+};
+
+RenderbufferManager::RenderbufferManager(
+ MemoryTracker* memory_tracker,
+ GLint max_renderbuffer_size,
+ GLint max_samples,
+ bool depth24_supported)
+ : memory_tracker_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kUnmanaged)),
+ max_renderbuffer_size_(max_renderbuffer_size),
+ max_samples_(max_samples),
+ depth24_supported_(depth24_supported),
+ num_uncleared_renderbuffers_(0),
+ renderbuffer_count_(0),
+ have_context_(true) {
+}
+
+RenderbufferManager::~RenderbufferManager() {
+ DCHECK(renderbuffers_.empty());
+ // If this triggers, that means something is keeping a reference to
+ // a Renderbuffer belonging to this.
+ CHECK_EQ(renderbuffer_count_, 0u);
+
+ DCHECK_EQ(0, num_uncleared_renderbuffers_);
+}
+
+size_t Renderbuffer::EstimatedSize() {
+ uint32 size = 0;
+ manager_->ComputeEstimatedRenderbufferSize(
+ width_, height_, samples_, internal_format_, &size);
+ return size;
+}
+
+
+size_t Renderbuffer::GetSignatureSize() const {
+ return sizeof(RenderbufferTag) + sizeof(RenderbufferSignature);
+}
+
+void Renderbuffer::AddToSignature(std::string* signature) const {
+ DCHECK(signature);
+ RenderbufferSignature signature_data(internal_format_,
+ samples_,
+ width_,
+ height_);
+
+ signature->append(RenderbufferTag, sizeof(RenderbufferTag));
+ signature->append(reinterpret_cast<const char*>(&signature_data),
+ sizeof(signature_data));
+}
+
+Renderbuffer::Renderbuffer(RenderbufferManager* manager,
+ GLuint client_id,
+ GLuint service_id)
+ : manager_(manager),
+ client_id_(client_id),
+ service_id_(service_id),
+ cleared_(true),
+ has_been_bound_(false),
+ samples_(0),
+ internal_format_(GL_RGBA4),
+ width_(0),
+ height_(0) {
+ manager_->StartTracking(this);
+}
+
+Renderbuffer::~Renderbuffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteRenderbuffersEXT(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void RenderbufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ renderbuffers_.clear();
+ DCHECK_EQ(0u, memory_tracker_->GetMemRepresented());
+}
+
+void RenderbufferManager::StartTracking(Renderbuffer* /* renderbuffer */) {
+ ++renderbuffer_count_;
+}
+
+void RenderbufferManager::StopTracking(Renderbuffer* renderbuffer) {
+ --renderbuffer_count_;
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ memory_tracker_->TrackMemFree(renderbuffer->EstimatedSize());
+}
+
+void RenderbufferManager::SetInfo(
+ Renderbuffer* renderbuffer,
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) {
+ DCHECK(renderbuffer);
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ memory_tracker_->TrackMemFree(renderbuffer->EstimatedSize());
+ renderbuffer->SetInfo(samples, internalformat, width, height);
+ memory_tracker_->TrackMemAlloc(renderbuffer->EstimatedSize());
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+void RenderbufferManager::SetCleared(Renderbuffer* renderbuffer,
+ bool cleared) {
+ DCHECK(renderbuffer);
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ renderbuffer->set_cleared(cleared);
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+void RenderbufferManager::CreateRenderbuffer(
+ GLuint client_id, GLuint service_id) {
+ scoped_refptr<Renderbuffer> renderbuffer(
+ new Renderbuffer(this, client_id, service_id));
+ std::pair<RenderbufferMap::iterator, bool> result =
+ renderbuffers_.insert(std::make_pair(client_id, renderbuffer));
+ DCHECK(result.second);
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+Renderbuffer* RenderbufferManager::GetRenderbuffer(
+ GLuint client_id) {
+ RenderbufferMap::iterator it = renderbuffers_.find(client_id);
+ return it != renderbuffers_.end() ? it->second.get() : NULL;
+}
+
+void RenderbufferManager::RemoveRenderbuffer(GLuint client_id) {
+ RenderbufferMap::iterator it = renderbuffers_.find(client_id);
+ if (it != renderbuffers_.end()) {
+ Renderbuffer* renderbuffer = it->second.get();
+ renderbuffer->MarkAsDeleted();
+ renderbuffers_.erase(it);
+ }
+}
+
+bool RenderbufferManager::ComputeEstimatedRenderbufferSize(int width,
+ int height,
+ int samples,
+ int internal_format,
+ uint32* size) const {
+ DCHECK(size);
+
+ uint32 temp = 0;
+ if (!SafeMultiplyUint32(width, height, &temp)) {
+ return false;
+ }
+ if (!SafeMultiplyUint32(temp, samples, &temp)) {
+ return false;
+ }
+ GLenum impl_format = InternalRenderbufferFormatToImplFormat(internal_format);
+ if (!SafeMultiplyUint32(
+ temp, GLES2Util::RenderbufferBytesPerPixel(impl_format), &temp)) {
+ return false;
+ }
+ *size = temp;
+ return true;
+}
+
+GLenum RenderbufferManager::InternalRenderbufferFormatToImplFormat(
+ GLenum impl_format) const {
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ switch (impl_format) {
+ case GL_DEPTH_COMPONENT16:
+ return GL_DEPTH_COMPONENT;
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ return GL_RGBA;
+ case GL_RGB565:
+ return GL_RGB;
+ }
+ } else {
+ // Upgrade 16-bit depth to 24-bit if possible.
+ if (impl_format == GL_DEPTH_COMPONENT16 && depth24_supported_)
+ return GL_DEPTH_COMPONENT24;
+ }
+ return impl_format;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/renderbuffer_manager.h b/gpu/command_buffer/service/renderbuffer_manager.h
new file mode 100644
index 0000000..71f830a
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager.h
@@ -0,0 +1,205 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class RenderbufferManager;
+
+// Info about a Renderbuffer.
+class GPU_EXPORT Renderbuffer
+ : public base::RefCounted<Renderbuffer> {
+ public:
+ Renderbuffer(RenderbufferManager* manager,
+ GLuint client_id,
+ GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLuint client_id() const {
+ return client_id_;
+ }
+
+ bool cleared() const {
+ return cleared_;
+ }
+
+ GLenum internal_format() const {
+ return internal_format_;
+ }
+
+ GLsizei samples() const {
+ return samples_;
+ }
+
+ GLsizei width() const {
+ return width_;
+ }
+
+ GLsizei height() const {
+ return height_;
+ }
+
+ bool IsDeleted() const {
+ return client_id_ == 0;
+ }
+
+ void MarkAsValid() {
+ has_been_bound_ = true;
+ }
+
+ bool IsValid() const {
+ return has_been_bound_ && !IsDeleted();
+ }
+
+ size_t EstimatedSize();
+
+ size_t GetSignatureSize() const;
+ void AddToSignature(std::string* signature) const;
+
+ private:
+ friend class RenderbufferManager;
+ friend class base::RefCounted<Renderbuffer>;
+
+ ~Renderbuffer();
+
+ void set_cleared(bool cleared) {
+ cleared_ = cleared;
+ }
+
+ void SetInfo(
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) {
+ samples_ = samples;
+ internal_format_ = internalformat;
+ width_ = width;
+ height_ = height;
+ cleared_ = false;
+ }
+
+ void MarkAsDeleted() {
+ client_id_ = 0;
+ }
+
+ // RenderbufferManager that owns this Renderbuffer.
+ RenderbufferManager* manager_;
+
+ // Client side renderbuffer id.
+ GLuint client_id_;
+
+ // Service side renderbuffer id.
+ GLuint service_id_;
+
+ // Whether this renderbuffer has been cleared
+ bool cleared_;
+
+ // Whether this renderbuffer has ever been bound.
+ bool has_been_bound_;
+
+ // Number of samples (for multi-sampled renderbuffers)
+ GLsizei samples_;
+
+ // Renderbuffer internalformat set through RenderbufferStorage().
+ GLenum internal_format_;
+
+ // Dimensions of renderbuffer.
+ GLsizei width_;
+ GLsizei height_;
+};
+
+// This class keeps track of the renderbuffers and whether or not they have
+// been cleared.
+class GPU_EXPORT RenderbufferManager {
+ public:
+ RenderbufferManager(MemoryTracker* memory_tracker,
+ GLint max_renderbuffer_size,
+ GLint max_samples,
+ bool depth24_supported);
+ ~RenderbufferManager();
+
+ GLint max_renderbuffer_size() const {
+ return max_renderbuffer_size_;
+ }
+
+ GLint max_samples() const {
+ return max_samples_;
+ }
+
+ bool HaveUnclearedRenderbuffers() const {
+ return num_uncleared_renderbuffers_ != 0;
+ }
+
+ void SetInfo(
+ Renderbuffer* renderbuffer,
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+
+ void SetCleared(Renderbuffer* renderbuffer, bool cleared);
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Renderbuffer for the given renderbuffer ids.
+ void CreateRenderbuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the renderbuffer for the given renderbuffer id.
+ Renderbuffer* GetRenderbuffer(GLuint client_id);
+
+ // Removes a renderbuffer for the given renderbuffer id.
+ void RemoveRenderbuffer(GLuint client_id);
+
+ size_t mem_represented() const {
+ return memory_tracker_->GetMemRepresented();
+ }
+
+ bool ComputeEstimatedRenderbufferSize(int width,
+ int height,
+ int samples,
+ int internal_format,
+ uint32* size) const;
+ GLenum InternalRenderbufferFormatToImplFormat(GLenum impl_format) const;
+
+ private:
+ friend class Renderbuffer;
+
+ void StartTracking(Renderbuffer* renderbuffer);
+ void StopTracking(Renderbuffer* renderbuffer);
+
+ scoped_ptr<MemoryTypeTracker> memory_tracker_;
+
+ GLint max_renderbuffer_size_;
+ GLint max_samples_;
+ bool depth24_supported_;
+
+ int num_uncleared_renderbuffers_;
+
+ // Counts the number of Renderbuffer allocated with 'this' as its manager.
+ // Allows to check no Renderbuffer will outlive this.
+ unsigned renderbuffer_count_;
+
+ bool have_context_;
+
+ // Info for each renderbuffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Renderbuffer> > RenderbufferMap;
+ RenderbufferMap renderbuffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(RenderbufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/renderbuffer_manager_unittest.cc b/gpu/command_buffer/service/renderbuffer_manager_unittest.cc
new file mode 100644
index 0000000..ba0ebea
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager_unittest.cc
@@ -0,0 +1,323 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+
+#include <set>
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+class RenderbufferManagerTestBase : public GpuServiceTest {
+ public:
+ static const GLint kMaxSize = 128;
+ static const GLint kMaxSamples = 4;
+
+ protected:
+ void SetUpBase(MemoryTracker* memory_tracker, bool depth24_supported) {
+ GpuServiceTest::SetUp();
+ manager_.reset(new RenderbufferManager(
+ memory_tracker, kMaxSize, kMaxSamples, depth24_supported));
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(true);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_ptr<RenderbufferManager> manager_;
+};
+
+class RenderbufferManagerTest : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ bool depth24_supported = false;
+ SetUpBase(NULL, depth24_supported);
+ }
+};
+
+class RenderbufferManagerMemoryTrackerTest
+ : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ bool depth24_supported = false;
+ SetUpBase(mock_memory_tracker_.get(), depth24_supported);
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint RenderbufferManagerTestBase::kMaxSize;
+const GLint RenderbufferManagerTestBase::kMaxSamples;
+#endif
+
+TEST_F(RenderbufferManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ EXPECT_EQ(kMaxSize, manager_->max_renderbuffer_size());
+ EXPECT_EQ(kMaxSamples, manager_->max_samples());
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ scoped_refptr<Renderbuffer> renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(kClient1Id, renderbuffer1->client_id());
+ // Check we get nothing for a non-existent renderbuffer.
+ EXPECT_TRUE(manager_->GetRenderbuffer(kClient2Id) == NULL);
+ // Check trying to a remove non-existent renderbuffers does not crash.
+ manager_->RemoveRenderbuffer(kClient2Id);
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the renderbuffer after we remove it.
+ manager_->RemoveRenderbuffer(kClient1Id);
+ EXPECT_TRUE(manager_->GetRenderbuffer(kClient1Id) == NULL);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(0u, renderbuffer1->client_id());
+}
+
+TEST_F(RenderbufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ renderbuffer1 = manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 == NULL);
+}
+
+TEST_F(RenderbufferManagerTest, Renderbuffer) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ EXPECT_EQ(kService1Id, renderbuffer1->service_id());
+ EXPECT_EQ(0, renderbuffer1->samples());
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA4), renderbuffer1->internal_format());
+ EXPECT_EQ(0, renderbuffer1->width());
+ EXPECT_EQ(0, renderbuffer1->height());
+ EXPECT_TRUE(renderbuffer1->cleared());
+ EXPECT_EQ(0u, renderbuffer1->EstimatedSize());
+
+ // Check if we set the info it gets marked as not cleared.
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight);
+ EXPECT_EQ(kSamples, renderbuffer1->samples());
+ EXPECT_EQ(kFormat, renderbuffer1->internal_format());
+ EXPECT_EQ(kWidth, renderbuffer1->width());
+ EXPECT_EQ(kHeight, renderbuffer1->height());
+ EXPECT_FALSE(renderbuffer1->cleared());
+ EXPECT_FALSE(renderbuffer1->IsDeleted());
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(kWidth * kHeight * 4u * 4u, renderbuffer1->EstimatedSize());
+
+ manager_->SetCleared(renderbuffer1, true);
+ EXPECT_TRUE(renderbuffer1->cleared());
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight);
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->RemoveRenderbuffer(kClient1Id);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+}
+
+TEST_F(RenderbufferManagerMemoryTrackerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight1 = 64;
+ const GLsizei kHeight2 = 32;
+ uint32 expected_size_1 = 0;
+ uint32 expected_size_2 = 0;
+ manager_->ComputeEstimatedRenderbufferSize(
+ kWidth, kHeight1, kSamples, kFormat, &expected_size_1);
+ manager_->ComputeEstimatedRenderbufferSize(
+ kWidth, kHeight2, kSamples, kFormat, &expected_size_2);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ 0, expected_size_1, MemoryTracker::kUnmanaged);
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight1);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ expected_size_1, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ 0, expected_size_2, MemoryTracker::kUnmanaged);
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight2);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ expected_size_2, 0, MemoryTracker::kUnmanaged);
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+TEST_F(RenderbufferManagerTest, UseDeletedRenderbufferInfo) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ scoped_refptr<Renderbuffer> renderbuffer1(
+ manager_->GetRenderbuffer(kClient1Id));
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ // Remove it.
+ manager_->RemoveRenderbuffer(kClient1Id);
+ // Use after removing.
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ // See that it still affects manager.
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+ manager_->SetCleared(renderbuffer1.get(), true);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ renderbuffer1 = NULL;
+}
+
+namespace {
+
+bool InSet(std::set<std::string>* string_set, const std::string& str) {
+ std::pair<std::set<std::string>::iterator, bool> result =
+ string_set->insert(str);
+ return !result.second;
+}
+
+} // anonymous namespace
+
+TEST_F(RenderbufferManagerTest, AddToSignature) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ scoped_refptr<Renderbuffer> renderbuffer1(
+ manager_->GetRenderbuffer(kClient1Id));
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ std::string signature1;
+ std::string signature2;
+ renderbuffer1->AddToSignature(&signature1);
+
+ std::set<std::string> string_set;
+ EXPECT_FALSE(InSet(&string_set, signature1));
+
+ // change things and see that the signatures change.
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples + 1, kFormat, kWidth, kHeight);
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat + 1, kWidth, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat, kWidth + 1, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight + 1);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // put it back to the same and it should be the same.
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check the set was acutally getting different signatures.
+ EXPECT_EQ(5u, string_set.size());
+
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+class RenderbufferManagerFormatTest : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ bool depth24_supported = true;
+ SetUpBase(NULL, depth24_supported);
+ }
+};
+
+TEST_F(RenderbufferManagerFormatTest, UpgradeDepthFormatOnGLES) {
+ gfx::GLImplementation prev_impl = gfx::GetGLImplementation();
+ gfx::SetGLImplementation(gfx::kGLImplementationEGLGLES2);
+ GLenum impl_format =
+ manager_->InternalRenderbufferFormatToImplFormat(GL_DEPTH_COMPONENT16);
+ gfx::SetGLImplementation(prev_impl);
+ EXPECT_EQ(static_cast<GLenum>(GL_DEPTH_COMPONENT24), impl_format);
+}
+
+TEST_F(RenderbufferManagerFormatTest, UseUnsizedDepthFormatOnNonGLES) {
+ gfx::GLImplementation prev_impl = gfx::GetGLImplementation();
+ gfx::SetGLImplementation(gfx::kGLImplementationDesktopGL);
+ GLenum impl_format =
+ manager_->InternalRenderbufferFormatToImplFormat(GL_DEPTH_COMPONENT16);
+ gfx::SetGLImplementation(prev_impl);
+ EXPECT_EQ(static_cast<GLenum>(GL_DEPTH_COMPONENT), impl_format);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/shader_manager.cc b/gpu/command_buffer/service/shader_manager.cc
new file mode 100644
index 0000000..189d78b
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_manager.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+
+namespace gpu {
+namespace gles2 {
+
+Shader::Shader(GLuint service_id, GLenum shader_type)
+ : use_count_(0),
+ service_id_(service_id),
+ shader_type_(shader_type),
+ valid_(false) {
+}
+
+Shader::~Shader() {
+}
+
+void Shader::DoCompile(ShaderTranslatorInterface* translator,
+ TranslatedShaderSourceType type) {
+ // Translate GL ES 2.0 shader to Desktop GL shader and pass that to
+ // glShaderSource and then glCompileShader.
+ const char* source_for_driver = source_.c_str();
+ if (translator) {
+ valid_ = translator->Translate(source_,
+ &log_info_,
+ &translated_source_,
+ &attrib_map_,
+ &uniform_map_,
+ &varying_map_,
+ &name_map_);
+ if (!valid_) {
+ return;
+ }
+ signature_source_ = source_;
+ source_for_driver = translated_source_.c_str();
+ }
+
+ glShaderSource(service_id_, 1, &source_for_driver, NULL);
+ glCompileShader(service_id_);
+ if (type == kANGLE) {
+ GLint max_len = 0;
+ glGetShaderiv(service_id_,
+ GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+ &max_len);
+ scoped_ptr<char[]> buffer(new char[max_len]);
+ GLint len = 0;
+ glGetTranslatedShaderSourceANGLE(
+ service_id_, max_len, &len, buffer.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || buffer[len] == '\0');
+ translated_source_ = std::string(buffer.get(), len);
+ }
+
+ GLint status = GL_FALSE;
+ glGetShaderiv(service_id_, GL_COMPILE_STATUS, &status);
+ if (status != GL_TRUE) {
+ // We cannot reach here if we are using the shader translator.
+ // All invalid shaders must be rejected by the translator.
+ // All translated shaders must compile.
+ GLint max_len = 0;
+ glGetShaderiv(service_id_, GL_INFO_LOG_LENGTH, &max_len);
+ scoped_ptr<char[]> buffer(new char[max_len]);
+ GLint len = 0;
+ glGetShaderInfoLog(service_id_, max_len, &len, buffer.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || buffer[len] == '\0');
+ valid_ = false;
+ log_info_ = std::string(buffer.get(), len);
+ LOG_IF(ERROR, translator)
+ << "Shader translator allowed/produced an invalid shader "
+ << "unless the driver is buggy:"
+ << "\n--original-shader--\n" << source_
+ << "\n--translated-shader--\n" << source_for_driver
+ << "\n--info-log--\n" << log_info_;
+ }
+}
+
+void Shader::IncUseCount() {
+ ++use_count_;
+}
+
+void Shader::DecUseCount() {
+ --use_count_;
+ DCHECK_GE(use_count_, 0);
+}
+
+void Shader::MarkAsDeleted() {
+ DCHECK_NE(service_id_, 0u);
+ service_id_ = 0;
+}
+
+const Shader::VariableInfo* Shader::GetAttribInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = attrib_map_.find(name);
+ return it != attrib_map_.end() ? &it->second : NULL;
+}
+
+const std::string* Shader::GetAttribMappedName(
+ const std::string& original_name) const {
+ for (VariableMap::const_iterator it = attrib_map_.begin();
+ it != attrib_map_.end(); ++it) {
+ if (it->second.name == original_name)
+ return &(it->first);
+ }
+ return NULL;
+}
+
+const std::string* Shader::GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const {
+ NameMap::const_iterator it = name_map_.find(hashed_name);
+ if (it != name_map_.end())
+ return &(it->second);
+ return NULL;
+}
+
+const Shader::VariableInfo* Shader::GetUniformInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = uniform_map_.find(name);
+ return it != uniform_map_.end() ? &it->second : NULL;
+}
+
+const Shader::VariableInfo* Shader::GetVaryingInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = varying_map_.find(name);
+ return it != varying_map_.end() ? &it->second : NULL;
+}
+
+ShaderManager::ShaderManager() {}
+
+ShaderManager::~ShaderManager() {
+ DCHECK(shaders_.empty());
+}
+
+void ShaderManager::Destroy(bool have_context) {
+ while (!shaders_.empty()) {
+ if (have_context) {
+ Shader* shader = shaders_.begin()->second.get();
+ if (!shader->IsDeleted()) {
+ glDeleteShader(shader->service_id());
+ shader->MarkAsDeleted();
+ }
+ }
+ shaders_.erase(shaders_.begin());
+ }
+}
+
+Shader* ShaderManager::CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type) {
+ std::pair<ShaderMap::iterator, bool> result =
+ shaders_.insert(std::make_pair(
+ client_id, scoped_refptr<Shader>(
+ new Shader(service_id, shader_type))));
+ DCHECK(result.second);
+ return result.first->second.get();
+}
+
+Shader* ShaderManager::GetShader(GLuint client_id) {
+ ShaderMap::iterator it = shaders_.find(client_id);
+ return it != shaders_.end() ? it->second.get() : NULL;
+}
+
+bool ShaderManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (ShaderMap::const_iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ShaderManager::IsOwned(Shader* shader) {
+ for (ShaderMap::iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second.get() == shader) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ShaderManager::RemoveShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ if (shader->IsDeleted() && !shader->InUse()) {
+ for (ShaderMap::iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second.get() == shader) {
+ shaders_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+}
+
+void ShaderManager::MarkAsDeleted(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->MarkAsDeleted();
+ RemoveShader(shader);
+}
+
+void ShaderManager::UseShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->IncUseCount();
+}
+
+void ShaderManager::UnuseShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->DecUseCount();
+ RemoveShader(shader);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/shader_manager.h b/gpu/command_buffer/service/shader_manager.h
new file mode 100644
index 0000000..359e574
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager.h
@@ -0,0 +1,221 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This is used to keep the source code for a shader. This is because in order
+// to emluate GLES2 the shaders will have to be re-written before passed to
+// the underlying OpenGL. But, when the user calls glGetShaderSource they
+// should get the source they passed in, not the re-written source.
+class GPU_EXPORT Shader : public base::RefCounted<Shader> {
+ public:
+ enum TranslatedShaderSourceType {
+ kANGLE,
+ kGL, // GL or GLES
+ };
+
+ typedef ShaderTranslator::VariableInfo VariableInfo;
+
+ void DoCompile(ShaderTranslatorInterface* translator,
+ TranslatedShaderSourceType type);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLenum shader_type() const {
+ return shader_type_;
+ }
+
+ const std::string& source() const {
+ return source_;
+ }
+
+ void set_source(const std::string& source) {
+ source_ = source;
+ }
+
+ const std::string& translated_source() const {
+ return translated_source_;
+ }
+
+ const std::string& signature_source() const {
+ return signature_source_;
+ }
+
+ const VariableInfo* GetAttribInfo(const std::string& name) const;
+ const VariableInfo* GetUniformInfo(const std::string& name) const;
+ const VariableInfo* GetVaryingInfo(const std::string& name) const;
+
+ // If the original_name is not found, return NULL.
+ const std::string* GetAttribMappedName(
+ const std::string& original_name) const;
+
+ // If the hashed_name is not found, return NULL.
+ const std::string* GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const;
+
+ const std::string& log_info() const {
+ return log_info_;
+ }
+
+ bool valid() const {
+ return valid_;
+ }
+
+ bool IsDeleted() const {
+ return service_id_ == 0;
+ }
+
+ bool InUse() const {
+ DCHECK_GE(use_count_, 0);
+ return use_count_ != 0;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& attrib_map() const {
+ return attrib_map_;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& uniform_map() const {
+ return uniform_map_;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& varying_map() const {
+ return varying_map_;
+ }
+
+ // Used by program cache.
+ void set_attrib_map(const ShaderTranslator::VariableMap& attrib_map) {
+ // copied because cache might be cleared
+ attrib_map_ = ShaderTranslator::VariableMap(attrib_map);
+ }
+
+ // Used by program cache.
+ void set_uniform_map(const ShaderTranslator::VariableMap& uniform_map) {
+ // copied because cache might be cleared
+ uniform_map_ = ShaderTranslator::VariableMap(uniform_map);
+ }
+
+ // Used by program cache.
+ void set_varying_map(const ShaderTranslator::VariableMap& varying_map) {
+ // copied because cache might be cleared
+ varying_map_ = ShaderTranslator::VariableMap(varying_map);
+ }
+
+ private:
+ typedef ShaderTranslator::VariableMap VariableMap;
+ typedef ShaderTranslator::NameMap NameMap;
+
+ friend class base::RefCounted<Shader>;
+ friend class ShaderManager;
+
+ Shader(GLuint service_id, GLenum shader_type);
+ ~Shader();
+
+ void IncUseCount();
+ void DecUseCount();
+ void MarkAsDeleted();
+
+ int use_count_;
+
+ // The shader this Shader is tracking.
+ GLuint service_id_;
+ // Type of shader - GL_VERTEX_SHADER or GL_FRAGMENT_SHADER.
+ GLenum shader_type_;
+
+ // True if compilation succeeded.
+ bool valid_;
+
+ // The shader source as passed to glShaderSource.
+ std::string source_;
+
+ // The source the last compile used.
+ std::string signature_source_;
+
+ // The translated shader source.
+ std::string translated_source_;
+
+ // The shader translation log.
+ std::string log_info_;
+
+ // The type info when the shader was last compiled.
+ VariableMap attrib_map_;
+ VariableMap uniform_map_;
+ VariableMap varying_map_;
+
+ // The name hashing info when the shader was last compiled.
+ NameMap name_map_;
+};
+
+// Tracks the Shaders.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT ShaderManager {
+ public:
+ ShaderManager();
+ ~ShaderManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a shader for the given shader ID.
+ Shader* CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type);
+
+ // Gets an existing shader info for the given shader ID. Returns NULL if none
+ // exists.
+ Shader* GetShader(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ void MarkAsDeleted(Shader* shader);
+
+ // Mark a shader as used
+ void UseShader(Shader* shader);
+
+ // Unmark a shader as used. If it has been deleted and is not used
+ // then we free the shader.
+ void UnuseShader(Shader* shader);
+
+ // Check if a Shader is owned by this ShaderManager.
+ bool IsOwned(Shader* shader);
+
+ private:
+ friend class Shader;
+
+ // Info for each shader by service side shader Id.
+ typedef base::hash_map<GLuint, scoped_refptr<Shader> > ShaderMap;
+ ShaderMap shaders_;
+
+ void RemoveShader(Shader* shader);
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/shader_manager_unittest.cc b/gpu/command_buffer/service/shader_manager_unittest.cc
new file mode 100644
index 0000000..d6236f7
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager_unittest.cc
@@ -0,0 +1,272 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Return;
+using ::testing::ReturnRef;
+
+namespace gpu {
+namespace gles2 {
+
+class ShaderManagerTest : public GpuServiceTest {
+ public:
+ ShaderManagerTest() {
+ }
+
+ virtual ~ShaderManagerTest() {
+ manager_.Destroy(false);
+ }
+
+ protected:
+ ShaderManager manager_;
+};
+
+TEST_F(ShaderManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ const GLuint kClient2Id = 2;
+ // Check we can create shader.
+ Shader* info0 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(info0 != NULL);
+ Shader* shader1 = manager_.GetShader(kClient1Id);
+ ASSERT_EQ(info0, shader1);
+ // Check we get nothing for a non-existent shader.
+ EXPECT_TRUE(manager_.GetShader(kClient2Id) == NULL);
+ // Check we can't get the shader after we remove it.
+ manager_.MarkAsDeleted(shader1);
+ EXPECT_TRUE(manager_.GetShader(kClient1Id) == NULL);
+}
+
+TEST_F(ShaderManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_CALL(*gl_, DeleteShader(kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check that resources got freed.
+ shader1 = manager_.GetShader(kClient1Id);
+ ASSERT_TRUE(shader1 == NULL);
+}
+
+TEST_F(ShaderManagerTest, DeleteBug) {
+ const GLuint kClient1Id = 1;
+ const GLuint kClient2Id = 2;
+ const GLuint kService1Id = 11;
+ const GLuint kService2Id = 12;
+ const GLenum kShaderType = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ scoped_refptr<Shader> shader1(
+ manager_.CreateShader(kClient1Id, kService1Id, kShaderType));
+ scoped_refptr<Shader> shader2(
+ manager_.CreateShader(kClient2Id, kService2Id, kShaderType));
+ ASSERT_TRUE(shader1.get());
+ ASSERT_TRUE(shader2.get());
+ manager_.UseShader(shader1.get());
+ manager_.MarkAsDeleted(shader1.get());
+ manager_.MarkAsDeleted(shader2.get());
+ EXPECT_TRUE(manager_.IsOwned(shader1.get()));
+ EXPECT_FALSE(manager_.IsOwned(shader2.get()));
+}
+
+TEST_F(ShaderManagerTest, DoCompile) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ const char* kClient1Source = "hello world";
+ const GLenum kAttrib1Type = GL_FLOAT_VEC2;
+ const GLsizei kAttrib1Size = 2;
+ const int kAttrib1Precision = SH_PRECISION_MEDIUMP;
+ const char* kAttrib1Name = "attr1";
+ const GLenum kAttrib2Type = GL_FLOAT_VEC3;
+ const GLsizei kAttrib2Size = 4;
+ const int kAttrib2Precision = SH_PRECISION_HIGHP;
+ const char* kAttrib2Name = "attr2";
+ const int kAttribStaticUse = 0;
+ const GLenum kUniform1Type = GL_FLOAT_MAT2;
+ const GLsizei kUniform1Size = 3;
+ const int kUniform1Precision = SH_PRECISION_LOWP;
+ const int kUniform1StaticUse = 1;
+ const char* kUniform1Name = "uni1";
+ const GLenum kUniform2Type = GL_FLOAT_MAT3;
+ const GLsizei kUniform2Size = 5;
+ const int kUniform2Precision = SH_PRECISION_MEDIUMP;
+ const int kUniform2StaticUse = 0;
+ const char* kUniform2Name = "uni2";
+ const GLenum kVarying1Type = GL_FLOAT_VEC4;
+ const GLsizei kVarying1Size = 1;
+ const int kVarying1Precision = SH_PRECISION_HIGHP;
+ const int kVarying1StaticUse = 0;
+ const char* kVarying1Name = "varying1";
+
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_EQ(kService1Id, shader1->service_id());
+ // Check if the shader has correct type.
+ EXPECT_EQ(kShader1Type, shader1->shader_type());
+ EXPECT_FALSE(shader1->valid());
+ EXPECT_FALSE(shader1->InUse());
+ EXPECT_TRUE(shader1->source().empty());
+ EXPECT_TRUE(shader1->log_info().empty());
+ EXPECT_TRUE(shader1->signature_source().empty());
+ EXPECT_TRUE(shader1->translated_source().empty());
+ EXPECT_EQ(0u, shader1->attrib_map().size());
+ EXPECT_EQ(0u, shader1->uniform_map().size());
+ EXPECT_EQ(0u, shader1->varying_map().size());
+
+ // Check we can set its source.
+ shader1->set_source(kClient1Source);
+ EXPECT_STREQ(kClient1Source, shader1->source().c_str());
+ EXPECT_TRUE(shader1->signature_source().empty());
+
+ // Check DoCompile() will set compilation states, log, translated source,
+ // shader variables, and name mapping.
+ const std::string kLog = "foo";
+ const std::string kTranslatedSource = "poo";
+
+ ShaderTranslator::VariableMap attrib_map;
+ attrib_map[kAttrib1Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib1Type, kAttrib1Size, kAttrib1Precision,
+ kAttribStaticUse, kAttrib1Name);
+ attrib_map[kAttrib2Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib2Type, kAttrib2Size, kAttrib2Precision,
+ kAttribStaticUse, kAttrib2Name);
+ ShaderTranslator::VariableMap uniform_map;
+ uniform_map[kUniform1Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform1Type, kUniform1Size, kUniform1Precision,
+ kUniform1StaticUse, kUniform1Name);
+ uniform_map[kUniform2Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform2Type, kUniform2Size, kUniform2Precision,
+ kUniform2StaticUse, kUniform2Name);
+ ShaderTranslator::VariableMap varying_map;
+ varying_map[kVarying1Name] = ShaderTranslatorInterface::VariableInfo(
+ kVarying1Type, kVarying1Size, kVarying1Precision,
+ kVarying1StaticUse, kVarying1Name);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), shader1, true, &kLog, &kTranslatedSource,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ EXPECT_TRUE(shader1->valid());
+ // When compilation succeeds, no log is recorded.
+ EXPECT_STREQ("", shader1->log_info().c_str());
+ EXPECT_STREQ(kClient1Source, shader1->signature_source().c_str());
+ EXPECT_STREQ(kTranslatedSource.c_str(), shader1->translated_source().c_str());
+
+ // Check varying infos got copied.
+ EXPECT_EQ(attrib_map.size(), shader1->attrib_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = attrib_map.begin();
+ it != attrib_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetAttribInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+ // Check uniform infos got copied.
+ EXPECT_EQ(uniform_map.size(), shader1->uniform_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = uniform_map.begin();
+ it != uniform_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetUniformInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+ // Check varying infos got copied.
+ EXPECT_EQ(varying_map.size(), shader1->varying_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = varying_map.begin();
+ it != varying_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetVaryingInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+
+ // Compile failure case.
+ TestHelper::SetShaderStates(
+ gl_.get(), shader1, false, &kLog, &kTranslatedSource,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ EXPECT_FALSE(shader1->valid());
+ EXPECT_STREQ(kLog.c_str(), shader1->log_info().c_str());
+ EXPECT_STREQ("", shader1->translated_source().c_str());
+ EXPECT_TRUE(shader1->attrib_map().empty());
+ EXPECT_TRUE(shader1->uniform_map().empty());
+ EXPECT_TRUE(shader1->varying_map().empty());
+}
+
+TEST_F(ShaderManagerTest, ShaderInfoUseCount) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_FALSE(shader1->InUse());
+ EXPECT_FALSE(shader1->IsDeleted());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.MarkAsDeleted(shader1);
+ EXPECT_TRUE(shader1->IsDeleted());
+ Shader* shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_EQ(shader1, shader2);
+ manager_.UnuseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1); // this should delete the info.
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_TRUE(shader2 == NULL);
+
+ shader1 = manager_.CreateShader(kClient1Id, kService1Id, kShader1Type);
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_FALSE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1);
+ EXPECT_FALSE(shader1->InUse());
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_EQ(shader1, shader2);
+ manager_.MarkAsDeleted(shader1); // this should delete the shader.
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_TRUE(shader2 == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/shader_translator.cc b/gpu/command_buffer/service/shader_translator.cc
new file mode 100644
index 0000000..bc06ab3
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_translator.h"
+
+#include <string.h>
+#include <GLES2/gl2.h>
+#include <algorithm>
+
+#include "base/at_exit.h"
+#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace {
+
+using gpu::gles2::ShaderTranslator;
+
+class ShaderTranslatorInitializer {
+ public:
+ ShaderTranslatorInitializer() {
+ TRACE_EVENT0("gpu", "ShInitialize");
+ CHECK(ShInitialize());
+ }
+
+ ~ShaderTranslatorInitializer() {
+ TRACE_EVENT0("gpu", "ShFinalize");
+ ShFinalize();
+ }
+};
+
+base::LazyInstance<ShaderTranslatorInitializer> g_translator_initializer =
+ LAZY_INSTANCE_INITIALIZER;
+
+void GetVariableInfo(ShHandle compiler, ShShaderInfo var_type,
+ ShaderTranslator::VariableMap* var_map) {
+ if (!var_map)
+ return;
+ var_map->clear();
+
+ size_t name_len = 0, mapped_name_len = 0;
+ switch (var_type) {
+ case SH_ACTIVE_ATTRIBUTES:
+ ShGetInfo(compiler, SH_ACTIVE_ATTRIBUTE_MAX_LENGTH, &name_len);
+ break;
+ case SH_ACTIVE_UNIFORMS:
+ ShGetInfo(compiler, SH_ACTIVE_UNIFORM_MAX_LENGTH, &name_len);
+ break;
+ case SH_VARYINGS:
+ ShGetInfo(compiler, SH_VARYING_MAX_LENGTH, &name_len);
+ break;
+ default: NOTREACHED();
+ }
+ ShGetInfo(compiler, SH_MAPPED_NAME_MAX_LENGTH, &mapped_name_len);
+ if (name_len <= 1 || mapped_name_len <= 1) return;
+ scoped_ptr<char[]> name(new char[name_len]);
+ scoped_ptr<char[]> mapped_name(new char[mapped_name_len]);
+
+ size_t num_vars = 0;
+ ShGetInfo(compiler, var_type, &num_vars);
+ for (size_t i = 0; i < num_vars; ++i) {
+ size_t len = 0;
+ int size = 0;
+ sh::GLenum type = GL_NONE;
+ ShPrecisionType precision = SH_PRECISION_UNDEFINED;
+ int static_use = 0;
+
+ ShGetVariableInfo(compiler, var_type, i,
+ &len, &size, &type, &precision, &static_use,
+ name.get(), mapped_name.get());
+
+ // In theory we should CHECK(len <= name_len - 1) here, but ANGLE needs
+ // to handle long struct field name mapping before we can do this.
+ // Also, we should modify the ANGLE interface to also return a length
+ // for mapped_name.
+ std::string name_string(name.get(), std::min(len, name_len - 1));
+ mapped_name.get()[mapped_name_len - 1] = '\0';
+
+ ShaderTranslator::VariableInfo info(
+ type, size, precision, static_use, name_string);
+ (*var_map)[mapped_name.get()] = info;
+ }
+}
+
+void GetNameHashingInfo(
+ ShHandle compiler, ShaderTranslator::NameMap* name_map) {
+ if (!name_map)
+ return;
+ name_map->clear();
+
+ size_t hashed_names_count = 0;
+ ShGetInfo(compiler, SH_HASHED_NAMES_COUNT, &hashed_names_count);
+ if (hashed_names_count == 0)
+ return;
+
+ size_t name_max_len = 0, hashed_name_max_len = 0;
+ ShGetInfo(compiler, SH_NAME_MAX_LENGTH, &name_max_len);
+ ShGetInfo(compiler, SH_HASHED_NAME_MAX_LENGTH, &hashed_name_max_len);
+
+ scoped_ptr<char[]> name(new char[name_max_len]);
+ scoped_ptr<char[]> hashed_name(new char[hashed_name_max_len]);
+
+ for (size_t i = 0; i < hashed_names_count; ++i) {
+ ShGetNameHashingEntry(compiler, i, name.get(), hashed_name.get());
+ (*name_map)[hashed_name.get()] = name.get();
+ }
+}
+
+} // namespace
+
+namespace gpu {
+namespace gles2 {
+
+ShaderTranslator::DestructionObserver::DestructionObserver() {
+}
+
+ShaderTranslator::DestructionObserver::~DestructionObserver() {
+}
+
+ShaderTranslator::ShaderTranslator()
+ : compiler_(NULL),
+ implementation_is_glsl_es_(false),
+ driver_bug_workarounds_(static_cast<ShCompileOptions>(0)) {
+}
+
+bool ShaderTranslator::Init(
+ GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) {
+ // Make sure Init is called only once.
+ DCHECK(compiler_ == NULL);
+ DCHECK(shader_type == GL_FRAGMENT_SHADER || shader_type == GL_VERTEX_SHADER);
+ DCHECK(shader_spec == SH_GLES2_SPEC || shader_spec == SH_WEBGL_SPEC);
+ DCHECK(resources != NULL);
+
+ g_translator_initializer.Get();
+
+ ShShaderOutput shader_output =
+ (glsl_implementation_type == kGlslES ? SH_ESSL_OUTPUT : SH_GLSL_OUTPUT);
+
+ {
+ TRACE_EVENT0("gpu", "ShConstructCompiler");
+ compiler_ = ShConstructCompiler(
+ shader_type, shader_spec, shader_output, resources);
+ }
+ compiler_options_ = *resources;
+ implementation_is_glsl_es_ = (glsl_implementation_type == kGlslES);
+ driver_bug_workarounds_ = driver_bug_workarounds;
+ return compiler_ != NULL;
+}
+
+int ShaderTranslator::GetCompileOptions() const {
+ int compile_options =
+ SH_OBJECT_CODE | SH_VARIABLES | SH_ENFORCE_PACKING_RESTRICTIONS |
+ SH_LIMIT_EXPRESSION_COMPLEXITY | SH_LIMIT_CALL_STACK_DEPTH |
+ SH_CLAMP_INDIRECT_ARRAY_BOUNDS;
+
+ compile_options |= driver_bug_workarounds_;
+
+ return compile_options;
+}
+
+bool ShaderTranslator::Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const {
+ // Make sure this instance is initialized.
+ DCHECK(compiler_ != NULL);
+
+ bool success = false;
+ {
+ TRACE_EVENT0("gpu", "ShCompile");
+ const char* const shader_strings[] = { shader_source.c_str() };
+ success = !!ShCompile(
+ compiler_, shader_strings, 1, GetCompileOptions());
+ }
+ if (success) {
+ if (translated_source) {
+ translated_source->clear();
+ // Get translated shader.
+ size_t obj_code_len = 0;
+ ShGetInfo(compiler_, SH_OBJECT_CODE_LENGTH, &obj_code_len);
+ if (obj_code_len > 1) {
+ scoped_ptr<char[]> buffer(new char[obj_code_len]);
+ ShGetObjectCode(compiler_, buffer.get());
+ *translated_source = std::string(buffer.get(), obj_code_len - 1);
+ }
+ }
+ // Get info for attribs, uniforms, and varyings.
+ GetVariableInfo(compiler_, SH_ACTIVE_ATTRIBUTES, attrib_map);
+ GetVariableInfo(compiler_, SH_ACTIVE_UNIFORMS, uniform_map);
+ GetVariableInfo(compiler_, SH_VARYINGS, varying_map);
+ // Get info for name hashing.
+ GetNameHashingInfo(compiler_, name_map);
+ }
+
+ // Get info log.
+ if (info_log) {
+ info_log->clear();
+ size_t info_log_len = 0;
+ ShGetInfo(compiler_, SH_INFO_LOG_LENGTH, &info_log_len);
+ if (info_log_len > 1) {
+ scoped_ptr<char[]> buffer(new char[info_log_len]);
+ ShGetInfoLog(compiler_, buffer.get());
+ *info_log = std::string(buffer.get(), info_log_len - 1);
+ }
+ }
+
+ return success;
+}
+
+std::string ShaderTranslator::GetStringForOptionsThatWouldAffectCompilation()
+ const {
+ DCHECK(compiler_ != NULL);
+
+ size_t resource_len = 0;
+ ShGetInfo(compiler_, SH_RESOURCES_STRING_LENGTH, &resource_len);
+ DCHECK(resource_len > 1);
+ scoped_ptr<char[]> resource_str(new char[resource_len]);
+
+ ShGetBuiltInResourcesString(compiler_, resource_len, resource_str.get());
+
+ return std::string(":CompileOptions:" +
+ base::IntToString(GetCompileOptions())) +
+ std::string(resource_str.get());
+}
+
+void ShaderTranslator::AddDestructionObserver(
+ DestructionObserver* observer) {
+ destruction_observers_.AddObserver(observer);
+}
+
+void ShaderTranslator::RemoveDestructionObserver(
+ DestructionObserver* observer) {
+ destruction_observers_.RemoveObserver(observer);
+}
+
+ShaderTranslator::~ShaderTranslator() {
+ FOR_EACH_OBSERVER(DestructionObserver,
+ destruction_observers_,
+ OnDestruct(this));
+
+ if (compiler_ != NULL)
+ ShDestruct(compiler_);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/shader_translator.h b/gpu/command_buffer/service/shader_translator.h
new file mode 100644
index 0000000..77e04ab
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/observer_list.h"
+#include "gpu/gpu_export.h"
+#include "third_party/angle/include/GLSLANG/ShaderLang.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Translates a GLSL ES 2.0 shader to desktop GLSL shader, or just
+// validates GLSL ES 2.0 shaders on a true GLSL ES implementation.
+class ShaderTranslatorInterface {
+ public:
+ enum GlslImplementationType {
+ kGlsl,
+ kGlslES
+ };
+
+ struct VariableInfo {
+ VariableInfo()
+ : type(0),
+ size(0),
+ precision(SH_PRECISION_UNDEFINED),
+ static_use(0) {
+ }
+
+ VariableInfo(int _type, int _size, int _precision,
+ int _static_use, std::string _name)
+ : type(_type),
+ size(_size),
+ precision(_precision),
+ static_use(_static_use),
+ name(_name) {
+ }
+ bool operator==(
+ const ShaderTranslatorInterface::VariableInfo& other) const {
+ return type == other.type &&
+ size == other.size &&
+ precision == other.precision &&
+ strcmp(name.c_str(), other.name.c_str()) == 0;
+ }
+
+ int type;
+ int size;
+ int precision;
+ int static_use;
+ std::string name; // name in the original shader source.
+ };
+
+ // Mapping between variable name and info.
+ typedef base::hash_map<std::string, VariableInfo> VariableMap;
+ // Mapping between hashed name and original name.
+ typedef base::hash_map<std::string, std::string> NameMap;
+
+ // Initializes the translator.
+ // Must be called once before using the translator object.
+ virtual bool Init(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) = 0;
+
+ // Translates the given shader source.
+ // Returns true if translation is successful, false otherwise.
+ // Always fill |info_log| if it's non-null.
+ // Upon success, fill |translated_shader|, |attrib_map|, |uniform_map|,
+ // |varying_map|, and |name_map| if they are non-null.
+ virtual bool Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_shader,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const = 0;
+
+ // Return a string that is unique for a specfic set of options that would
+ // possibly affect compilation.
+ virtual std::string GetStringForOptionsThatWouldAffectCompilation() const = 0;
+
+ protected:
+ virtual ~ShaderTranslatorInterface() {}
+};
+
+// Implementation of ShaderTranslatorInterface
+class GPU_EXPORT ShaderTranslator
+ : public base::RefCounted<ShaderTranslator>,
+ NON_EXPORTED_BASE(public ShaderTranslatorInterface) {
+ public:
+ class DestructionObserver {
+ public:
+ DestructionObserver();
+ virtual ~DestructionObserver();
+
+ virtual void OnDestruct(ShaderTranslator* translator) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DestructionObserver);
+ };
+
+ ShaderTranslator();
+
+ // Overridden from ShaderTranslatorInterface.
+ virtual bool Init(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) OVERRIDE;
+
+ // Overridden from ShaderTranslatorInterface.
+ virtual bool Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const OVERRIDE;
+
+ virtual std::string GetStringForOptionsThatWouldAffectCompilation() const
+ OVERRIDE;
+
+ void AddDestructionObserver(DestructionObserver* observer);
+ void RemoveDestructionObserver(DestructionObserver* observer);
+
+ private:
+ friend class base::RefCounted<ShaderTranslator>;
+
+ virtual ~ShaderTranslator();
+ int GetCompileOptions() const;
+
+ ShHandle compiler_;
+ ShBuiltInResources compiler_options_;
+ bool implementation_is_glsl_es_;
+ ShCompileOptions driver_bug_workarounds_;
+ ObserverList<DestructionObserver> destruction_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderTranslator);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+
diff --git a/gpu/command_buffer/service/shader_translator_cache.cc b/gpu/command_buffer/service/shader_translator_cache.cc
new file mode 100644
index 0000000..631a88e
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_cache.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+
+namespace gpu {
+namespace gles2 {
+
+ShaderTranslatorCache::ShaderTranslatorCache() {
+}
+
+ShaderTranslatorCache::~ShaderTranslatorCache() {
+ DCHECK(cache_.empty());
+}
+
+void ShaderTranslatorCache::OnDestruct(ShaderTranslator* translator) {
+ Cache::iterator it = cache_.begin();
+ while (it != cache_.end()) {
+ if (it->second == translator) {
+ cache_.erase(it);
+ return;
+ }
+ it++;
+ }
+}
+
+scoped_refptr<ShaderTranslator> ShaderTranslatorCache::GetTranslator(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) {
+ ShaderTranslatorInitParams params(shader_type,
+ shader_spec,
+ *resources,
+ glsl_implementation_type,
+ driver_bug_workarounds);
+
+ Cache::iterator it = cache_.find(params);
+ if (it != cache_.end())
+ return it->second;
+
+ ShaderTranslator* translator = new ShaderTranslator();
+ if (translator->Init(shader_type, shader_spec, resources,
+ glsl_implementation_type,
+ driver_bug_workarounds)) {
+ cache_[params] = translator;
+ translator->AddDestructionObserver(this);
+ return translator;
+ } else {
+ return NULL;
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/shader_translator_cache.h b/gpu/command_buffer/service/shader_translator_cache.h
new file mode 100644
index 0000000..2a272d1
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_cache.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
+
+#include <string.h>
+
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "third_party/angle/include/GLSLANG/ShaderLang.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class is not thread safe and can only be created and destroyed
+// on a single thread. But it is safe to use two independent instances on two
+// threads without synchronization.
+//
+// TODO(backer): Investigate using glReleaseShaderCompiler as an alternative to
+// to this cache.
+class GPU_EXPORT ShaderTranslatorCache
+ : public base::RefCounted<ShaderTranslatorCache>,
+ public NON_EXPORTED_BASE(ShaderTranslator::DestructionObserver) {
+ public:
+ ShaderTranslatorCache();
+
+ // ShaderTranslator::DestructionObserver implementation
+ virtual void OnDestruct(ShaderTranslator* translator) OVERRIDE;
+
+ scoped_refptr<ShaderTranslator> GetTranslator(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds);
+
+ private:
+ friend class base::RefCounted<ShaderTranslatorCache>;
+ virtual ~ShaderTranslatorCache();
+
+ // Parameters passed into ShaderTranslator::Init
+ struct ShaderTranslatorInitParams {
+ sh::GLenum shader_type;
+ ShShaderSpec shader_spec;
+ ShBuiltInResources resources;
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type;
+ ShCompileOptions driver_bug_workarounds;
+
+ ShaderTranslatorInitParams(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources& resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds)
+ : shader_type(shader_type),
+ shader_spec(shader_spec),
+ resources(resources),
+ glsl_implementation_type(glsl_implementation_type),
+ driver_bug_workarounds(driver_bug_workarounds) {
+ }
+
+ ShaderTranslatorInitParams(const ShaderTranslatorInitParams& params) {
+ memcpy(this, ¶ms, sizeof(*this));
+ }
+
+ bool operator== (const ShaderTranslatorInitParams& params) const {
+ return memcmp(¶ms, this, sizeof(*this)) == 0;
+ }
+
+ bool operator< (const ShaderTranslatorInitParams& params) const {
+ return memcmp(¶ms, this, sizeof(*this)) < 0;
+ }
+ };
+
+ typedef std::map<ShaderTranslatorInitParams, ShaderTranslator* > Cache;
+ Cache cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderTranslatorCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
diff --git a/gpu/command_buffer/service/shader_translator_unittest.cc b/gpu/command_buffer/service/shader_translator_unittest.cc
new file mode 100644
index 0000000..f489626
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_unittest.cc
@@ -0,0 +1,323 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ShaderTranslatorTest : public testing::Test {
+ public:
+ ShaderTranslatorTest() {
+ }
+
+ virtual ~ShaderTranslatorTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+ resources.MaxExpressionComplexity = 32;
+ resources.MaxCallStackDepth = 32;
+
+ vertex_translator_ = new ShaderTranslator();
+ fragment_translator_ = new ShaderTranslator();
+
+ ASSERT_TRUE(vertex_translator_->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+ ASSERT_TRUE(fragment_translator_->Init(
+ GL_FRAGMENT_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ static_cast<ShCompileOptions>(0)));
+ }
+ virtual void TearDown() {
+ vertex_translator_ = NULL;
+ fragment_translator_ = NULL;
+ }
+
+ scoped_refptr<ShaderTranslator> vertex_translator_;
+ scoped_refptr<ShaderTranslator> fragment_translator_;
+};
+
+TEST_F(ShaderTranslatorTest, ValidVertexShader) {
+ const char* shader =
+ "void main() {\n"
+ " gl_Position = vec4(1.0);\n"
+ "}";
+
+ // A valid shader should be successfully translated.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ // There should be no name mapping.
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, InvalidVertexShader) {
+ const char* bad_shader = "foo-bar";
+ const char* good_shader =
+ "void main() {\n"
+ " gl_Position = vec4(1.0);\n"
+ "}";
+
+ // An invalid shader should fail.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_FALSE(vertex_translator_->Translate(bad_shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be valid and non-empty.
+ ASSERT_FALSE(info_log.empty());
+ // Translated shader must be NULL.
+ EXPECT_TRUE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings, or name mapping.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+
+ // Try a good shader after bad.
+ info_log.clear();
+ EXPECT_TRUE(vertex_translator_->Translate(good_shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ EXPECT_TRUE(info_log.empty());
+ EXPECT_FALSE(translated_source.empty());
+}
+
+TEST_F(ShaderTranslatorTest, ValidFragmentShader) {
+ const char* shader =
+ "void main() {\n"
+ " gl_FragColor = vec4(1.0);\n"
+ "}";
+
+ // A valid shader should be successfully translated.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings, or name mapping.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, InvalidFragmentShader) {
+ const char* shader = "foo-bar";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ // An invalid shader should fail.
+ EXPECT_FALSE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be valid and non-empty.
+ EXPECT_FALSE(info_log.empty());
+ // Translated shader must be NULL.
+ EXPECT_TRUE(translated_source.empty());
+ // There should be no attributes or uniforms.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, GetAttributes) {
+ const char* shader =
+ "attribute vec4 vPosition;\n"
+ "void main() {\n"
+ " gl_Position = vPosition;\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ EXPECT_FALSE(translated_source.empty());
+ // There should be no uniforms.
+ EXPECT_TRUE(uniform_map.empty());
+ // There should be one attribute with following characteristics:
+ // name:vPosition type:GL_FLOAT_VEC4 size:1.
+ EXPECT_EQ(1u, attrib_map.size());
+ ShaderTranslator::VariableMap::const_iterator iter =
+ attrib_map.find("vPosition");
+ EXPECT_TRUE(iter != attrib_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("vPosition", iter->second.name);
+}
+
+TEST_F(ShaderTranslatorTest, GetUniforms) {
+ const char* shader =
+ "precision mediump float;\n"
+ "struct Foo {\n"
+ " vec4 color[1];\n"
+ "};\n"
+ "struct Bar {\n"
+ " Foo foo;\n"
+ "};\n"
+ "uniform Bar bar[2];\n"
+ "void main() {\n"
+ " gl_FragColor = bar[0].foo.color[0] + bar[1].foo.color[0];\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ EXPECT_FALSE(translated_source.empty());
+ // There should be no attributes.
+ EXPECT_TRUE(attrib_map.empty());
+ // There should be two uniforms with following characteristics:
+ // 1. name:bar[0].foo.color[0] type:GL_FLOAT_VEC4 size:1
+ // 2. name:bar[1].foo.color[0] type:GL_FLOAT_VEC4 size:1
+ EXPECT_EQ(2u, uniform_map.size());
+ // First uniform.
+ ShaderTranslator::VariableMap::const_iterator iter =
+ uniform_map.find("bar[0].foo.color[0]");
+ EXPECT_TRUE(iter != uniform_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("bar[0].foo.color[0]", iter->second.name);
+ // Second uniform.
+ iter = uniform_map.find("bar[1].foo.color[0]");
+ EXPECT_TRUE(iter != uniform_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("bar[1].foo.color[0]", iter->second.name);
+}
+
+#if defined(OS_MACOSX)
+TEST_F(ShaderTranslatorTest, BuiltInFunctionEmulation) {
+ // This test might become invalid in the future when ANGLE Translator is no
+ // longer emulate dot(float, float) in Mac, or the emulated function name is
+ // no longer webgl_dot_emu.
+ const char* shader =
+ "void main() {\n"
+ " gl_Position = vec4(dot(1.0, 1.0), 1.0, 1.0, 1.0);\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ EXPECT_TRUE(strstr(translated_source.c_str(),
+ "webgl_dot_emu") != NULL);
+}
+#endif
+
+TEST_F(ShaderTranslatorTest, OptionsString) {
+ scoped_refptr<ShaderTranslator> translator_1 = new ShaderTranslator();
+ scoped_refptr<ShaderTranslator> translator_2 = new ShaderTranslator();
+ scoped_refptr<ShaderTranslator> translator_3 = new ShaderTranslator();
+
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+
+ ASSERT_TRUE(translator_1->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+ ASSERT_TRUE(translator_2->Init(
+ GL_FRAGMENT_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ static_cast<ShCompileOptions>(0)));
+ resources.EXT_draw_buffers = 1;
+ ASSERT_TRUE(translator_3->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+
+ std::string options_1(
+ translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_2(
+ translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_3(
+ translator_2->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_4(
+ translator_3->GetStringForOptionsThatWouldAffectCompilation());
+
+ EXPECT_EQ(options_1, options_2);
+ EXPECT_NE(options_1, options_3);
+ EXPECT_NE(options_1, options_4);
+ EXPECT_NE(options_3, options_4);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc b/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc
new file mode 100644
index 0000000..b59cf5c
--- /dev/null
+++ b/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc
@@ -0,0 +1,170 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+
+namespace {
+
+// Simply wraps a SurfaceTexture reference as a GLImage.
+class GLImageImpl : public gfx::GLImage {
+ public:
+ GLImageImpl(const scoped_refptr<gfx::SurfaceTexture>& surface_texture,
+ const base::Closure& release_callback);
+
+ // implement gfx::GLImage
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual bool BindTexImage(unsigned target) OVERRIDE;
+ virtual void ReleaseTexImage(unsigned target) OVERRIDE;
+ virtual bool CopyTexImage(unsigned target) OVERRIDE;
+ virtual void WillUseTexImage() OVERRIDE;
+ virtual void DidUseTexImage() OVERRIDE {}
+ virtual void WillModifyTexImage() OVERRIDE {}
+ virtual void DidModifyTexImage() OVERRIDE {}
+ virtual bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) OVERRIDE;
+
+ private:
+ virtual ~GLImageImpl();
+
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_;
+ base::Closure release_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImageImpl);
+};
+
+GLImageImpl::GLImageImpl(
+ const scoped_refptr<gfx::SurfaceTexture>& surface_texture,
+ const base::Closure& release_callback)
+ : surface_texture_(surface_texture), release_callback_(release_callback) {}
+
+GLImageImpl::~GLImageImpl() {
+ release_callback_.Run();
+}
+
+void GLImageImpl::Destroy(bool have_context) {
+ NOTREACHED();
+}
+
+gfx::Size GLImageImpl::GetSize() {
+ return gfx::Size();
+}
+
+bool GLImageImpl::BindTexImage(unsigned target) {
+ NOTREACHED();
+ return false;
+}
+
+void GLImageImpl::ReleaseTexImage(unsigned target) {
+ NOTREACHED();
+}
+
+bool GLImageImpl::CopyTexImage(unsigned target) {
+ return false;
+}
+
+void GLImageImpl::WillUseTexImage() {
+ surface_texture_->UpdateTexImage();
+}
+
+bool GLImageImpl::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) {
+ NOTREACHED();
+ return false;
+}
+
+} // anonymous namespace
+
+StreamTextureManagerInProcess::StreamTextureManagerInProcess()
+ : next_id_(1), weak_factory_(this) {}
+
+StreamTextureManagerInProcess::~StreamTextureManagerInProcess() {
+ if (!textures_.empty()) {
+ LOG(WARNING) << "Undestroyed surface textures while tearing down "
+ "StreamTextureManager.";
+ }
+}
+
+GLuint StreamTextureManagerInProcess::CreateStreamTexture(
+ uint32 client_texture_id,
+ gles2::TextureManager* texture_manager) {
+ CalledOnValidThread();
+
+ gles2::TextureRef* texture = texture_manager->GetTexture(client_texture_id);
+
+ if (!texture || (texture->texture()->target() &&
+ texture->texture()->target() != GL_TEXTURE_EXTERNAL_OES)) {
+ return 0;
+ }
+
+ scoped_refptr<gfx::SurfaceTexture> surface_texture(
+ gfx::SurfaceTexture::Create(texture->service_id()));
+
+ uint32 stream_id = next_id_++;
+ base::Closure release_callback =
+ base::Bind(&StreamTextureManagerInProcess::OnReleaseStreamTexture,
+ weak_factory_.GetWeakPtr(), stream_id);
+ scoped_refptr<gfx::GLImage> gl_image(new GLImageImpl(surface_texture,
+ release_callback));
+
+ gfx::Size size = gl_image->GetSize();
+ texture_manager->SetTarget(texture, GL_TEXTURE_EXTERNAL_OES);
+ texture_manager->SetLevelInfo(texture,
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ size.width(),
+ size.height(),
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ texture_manager->SetLevelImage(texture, GL_TEXTURE_EXTERNAL_OES, 0, gl_image);
+
+ {
+ base::AutoLock lock(map_lock_);
+ textures_[stream_id] = surface_texture;
+ }
+
+ if (next_id_ == 0)
+ next_id_++;
+
+ return stream_id;
+}
+
+void StreamTextureManagerInProcess::OnReleaseStreamTexture(uint32 stream_id) {
+ CalledOnValidThread();
+ base::AutoLock lock(map_lock_);
+ textures_.erase(stream_id);
+}
+
+// This can get called from any thread.
+scoped_refptr<gfx::SurfaceTexture>
+StreamTextureManagerInProcess::GetSurfaceTexture(uint32 stream_id) {
+ base::AutoLock lock(map_lock_);
+ TextureMap::const_iterator it = textures_.find(stream_id);
+ if (it != textures_.end())
+ return it->second;
+
+ return NULL;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/stream_texture_manager_in_process_android.h b/gpu/command_buffer/service/stream_texture_manager_in_process_android.h
new file mode 100644
index 0000000..8b507b0
--- /dev/null
+++ b/gpu/command_buffer/service/stream_texture_manager_in_process_android.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
+#define GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
+
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/non_thread_safe.h"
+
+namespace gfx {
+class SurfaceTexture;
+}
+
+namespace gpu {
+
+namespace gles2 {
+class TextureManager;
+}
+
+class StreamTextureManagerInProcess : public base::NonThreadSafe {
+ public:
+ StreamTextureManagerInProcess();
+ ~StreamTextureManagerInProcess();
+
+ uint32 CreateStreamTexture(uint32 client_texture_id,
+ gles2::TextureManager* texture_manager);
+
+ // This method can be called from any thread.
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(uint32 stream_id);
+
+ private:
+ void OnReleaseStreamTexture(uint32 stream_id);
+
+ typedef std::map<uint32, scoped_refptr<gfx::SurfaceTexture> > TextureMap;
+ TextureMap textures_;
+ base::Lock map_lock_;
+ uint32 next_id_;
+
+ base::WeakPtrFactory<StreamTextureManagerInProcess> weak_factory_;
+ DISALLOW_COPY_AND_ASSIGN(StreamTextureManagerInProcess);
+};
+
+} // gpu
+
+#endif // GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
diff --git a/gpu/command_buffer/service/test_helper.cc b/gpu/command_buffer/service/test_helper.cc
new file mode 100644
index 0000000..2f0e9c9
--- /dev/null
+++ b/gpu/command_buffer/service/test_helper.cc
@@ -0,0 +1,729 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/test_helper.h"
+
+#include <algorithm>
+#include <string>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_tokenizer.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint TestHelper::kServiceBlackTexture2dId;
+const GLuint TestHelper::kServiceDefaultTexture2dId;
+const GLuint TestHelper::kServiceBlackTextureCubemapId;
+const GLuint TestHelper::kServiceDefaultTextureCubemapId;
+const GLuint TestHelper::kServiceBlackExternalTextureId;
+const GLuint TestHelper::kServiceDefaultExternalTextureId;
+const GLuint TestHelper::kServiceBlackRectangleTextureId;
+const GLuint TestHelper::kServiceDefaultRectangleTextureId;
+
+const GLint TestHelper::kMaxSamples;
+const GLint TestHelper::kMaxRenderbufferSize;
+const GLint TestHelper::kMaxTextureSize;
+const GLint TestHelper::kMaxCubeMapTextureSize;
+const GLint TestHelper::kNumVertexAttribs;
+const GLint TestHelper::kNumTextureUnits;
+const GLint TestHelper::kMaxTextureImageUnits;
+const GLint TestHelper::kMaxVertexTextureImageUnits;
+const GLint TestHelper::kMaxFragmentUniformVectors;
+const GLint TestHelper::kMaxFragmentUniformComponents;
+const GLint TestHelper::kMaxVaryingVectors;
+const GLint TestHelper::kMaxVaryingFloats;
+const GLint TestHelper::kMaxVertexUniformVectors;
+const GLint TestHelper::kMaxVertexUniformComponents;
+#endif
+
+void TestHelper::SetupTextureInitializationExpectations(
+ ::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures) {
+ InSequence sequence;
+
+ bool needs_initialization = (target != GL_TEXTURE_EXTERNAL_OES);
+ bool needs_faces = (target == GL_TEXTURE_CUBE_MAP);
+
+ static GLuint texture_2d_ids[] = {
+ kServiceBlackTexture2dId,
+ kServiceDefaultTexture2dId };
+ static GLuint texture_cube_map_ids[] = {
+ kServiceBlackTextureCubemapId,
+ kServiceDefaultTextureCubemapId };
+ static GLuint texture_external_oes_ids[] = {
+ kServiceBlackExternalTextureId,
+ kServiceDefaultExternalTextureId };
+ static GLuint texture_rectangle_arb_ids[] = {
+ kServiceBlackRectangleTextureId,
+ kServiceDefaultRectangleTextureId };
+
+ const GLuint* texture_ids = NULL;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_ids = &texture_2d_ids[0];
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_ids = &texture_cube_map_ids[0];
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_ids = &texture_external_oes_ids[0];
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_ids = &texture_rectangle_arb_ids[0];
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ int array_size = use_default_textures ? 2 : 1;
+
+ EXPECT_CALL(*gl, GenTextures(array_size, _))
+ .WillOnce(SetArrayArgument<1>(texture_ids,
+ texture_ids + array_size))
+ .RetiresOnSaturation();
+ for (int ii = 0; ii < array_size; ++ii) {
+ EXPECT_CALL(*gl, BindTexture(target, texture_ids[ii]))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (needs_initialization) {
+ if (needs_faces) {
+ static GLenum faces[] = {
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ };
+ for (size_t ii = 0; ii < arraysize(faces); ++ii) {
+ EXPECT_CALL(*gl, TexImage2D(faces[ii], 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ } else {
+ EXPECT_CALL(*gl, TexImage2D(target, 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ }
+ }
+ EXPECT_CALL(*gl, BindTexture(target, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupTextureManagerInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures) {
+ InSequence sequence;
+
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_2D, use_default_textures);
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_CUBE_MAP, use_default_textures);
+
+ bool ext_image_external = false;
+ bool arb_texture_rectangle = false;
+ base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
+ while (t.GetNext()) {
+ if (t.token() == "GL_OES_EGL_image_external") {
+ ext_image_external = true;
+ break;
+ }
+ if (t.token() == "GL_ARB_texture_rectangle") {
+ arb_texture_rectangle = true;
+ break;
+ }
+ }
+
+ if (ext_image_external) {
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_EXTERNAL_OES, use_default_textures);
+ }
+ if (arb_texture_rectangle) {
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_RECTANGLE_ARB, use_default_textures);
+ }
+}
+
+void TestHelper::SetupTextureDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures) {
+ if (!use_default_textures)
+ return;
+
+ GLuint texture_id = 0;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_id = kServiceDefaultTexture2dId;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_id = kServiceDefaultTextureCubemapId;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_id = kServiceDefaultExternalTextureId;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_id = kServiceDefaultRectangleTextureId;
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ EXPECT_CALL(*gl, DeleteTextures(1, Pointee(texture_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupTextureManagerDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures) {
+ SetupTextureDestructionExpectations(gl, GL_TEXTURE_2D, use_default_textures);
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_CUBE_MAP, use_default_textures);
+
+ bool ext_image_external = false;
+ bool arb_texture_rectangle = false;
+ base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
+ while (t.GetNext()) {
+ if (t.token() == "GL_OES_EGL_image_external") {
+ ext_image_external = true;
+ break;
+ }
+ if (t.token() == "GL_ARB_texture_rectangle") {
+ arb_texture_rectangle = true;
+ break;
+ }
+ }
+
+ if (ext_image_external) {
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_EXTERNAL_OES, use_default_textures);
+ }
+ if (arb_texture_rectangle) {
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_RECTANGLE_ARB, use_default_textures);
+ }
+
+ EXPECT_CALL(*gl, DeleteTextures(4, _))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupContextGroupInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const DisallowedFeatures& disallowed_features,
+ const char* extensions,
+ const char* gl_version,
+ bool bind_generates_resource) {
+ InSequence sequence;
+
+ SetupFeatureInfoInitExpectationsWithGLVersion(gl, extensions, "", gl_version);
+
+ std::string l_version(base::StringToLowerASCII(std::string(gl_version)));
+ bool is_es3 = (l_version.substr(0, 12) == "opengl es 3.");
+
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_RENDERBUFFER_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxRenderbufferSize))
+ .RetiresOnSaturation();
+ if (strstr(extensions, "GL_EXT_framebuffer_multisample") ||
+ strstr(extensions, "GL_EXT_multisampled_render_to_texture") || is_es3) {
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxSamples))
+ .RetiresOnSaturation();
+ } else if (strstr(extensions, "GL_IMG_multisampled_render_to_texture")) {
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES_IMG, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxSamples))
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
+ .WillOnce(SetArgumentPointee<1>(kNumVertexAttribs))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kNumTextureUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_TEXTURE_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxTextureSize))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxCubeMapTextureSize))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxTextureImageUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVertexTextureImageUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxFragmentUniformComponents))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VARYING_FLOATS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVaryingFloats))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVertexUniformComponents))
+ .RetiresOnSaturation();
+
+ bool use_default_textures = bind_generates_resource;
+ SetupTextureManagerInitExpectations(gl, extensions, use_default_textures);
+}
+
+void TestHelper::SetupFeatureInfoInitExpectations(
+ ::gfx::MockGLInterface* gl, const char* extensions) {
+ SetupFeatureInfoInitExpectationsWithGLVersion(gl, extensions, "", "");
+}
+
+void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ const char* gl_renderer,
+ const char* gl_version) {
+ InSequence sequence;
+
+ EXPECT_CALL(*gl, GetString(GL_EXTENSIONS))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(extensions)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetString(GL_RENDERER))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(gl_renderer)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetString(GL_VERSION))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(gl_version)))
+ .RetiresOnSaturation();
+
+ std::string l_version(base::StringToLowerASCII(std::string(gl_version)));
+ bool is_es3 = (l_version.substr(0, 12) == "opengl es 3.");
+
+ if (strstr(extensions, "GL_ARB_texture_float") ||
+ (is_es3 && strstr(extensions, "GL_EXT_color_buffer_float"))) {
+ static const GLuint gl_ids[] = {101, 102};
+ const GLsizei width = 16;
+ EXPECT_CALL(*gl, GetIntegerv(GL_FRAMEBUFFER_BINDING, _))
+ .WillOnce(SetArgumentPointee<1>(gl_ids[0]))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_TEXTURE_BINDING_2D, _))
+ .WillOnce(SetArgumentPointee<1>(gl_ids[0]))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GenTextures(1, _))
+ .WillOnce(SetArrayArgument<1>(gl_ids + 1, gl_ids + 2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArrayArgument<1>(gl_ids + 1, gl_ids + 2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindTexture(GL_TEXTURE_2D, gl_ids[1]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, width, 0,
+ GL_RGBA, GL_FLOAT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindFramebufferEXT(GL_FRAMEBUFFER, gl_ids[1]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, gl_ids[1], 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, width, 0,
+ GL_RGB, GL_FLOAT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (is_es3) {
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT))
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, DeleteFramebuffersEXT(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindFramebufferEXT(GL_FRAMEBUFFER, gl_ids[0]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindTexture(GL_TEXTURE_2D, gl_ids[0]))
+ .Times(1)
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+#endif
+ }
+}
+
+void TestHelper::SetupExpectationsForClearingUniforms(
+ ::gfx::MockGLInterface* gl, UniformInfo* uniforms, size_t num_uniforms) {
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ switch (info.type) {
+ case GL_FLOAT:
+ EXPECT_CALL(*gl, Uniform1fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC2:
+ EXPECT_CALL(*gl, Uniform2fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC3:
+ EXPECT_CALL(*gl, Uniform3fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC4:
+ EXPECT_CALL(*gl, Uniform4fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT:
+ case GL_BOOL:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_2D_RECT_ARB:
+ EXPECT_CALL(*gl, Uniform1iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC2:
+ case GL_BOOL_VEC2:
+ EXPECT_CALL(*gl, Uniform2iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC3:
+ case GL_BOOL_VEC3:
+ EXPECT_CALL(*gl, Uniform3iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC4:
+ case GL_BOOL_VEC4:
+ EXPECT_CALL(*gl, Uniform4iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT2:
+ EXPECT_CALL(*gl, UniformMatrix2fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT3:
+ EXPECT_CALL(*gl, UniformMatrix3fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT4:
+ EXPECT_CALL(*gl, UniformMatrix4fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+}
+
+void TestHelper::SetupProgramSuccessExpectations(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(num_attribs))
+ .RetiresOnSaturation();
+ size_t max_attrib_len = 0;
+ for (size_t ii = 0; ii < num_attribs; ++ii) {
+ size_t len = strlen(attribs[ii].name) + 1;
+ max_attrib_len = std::max(max_attrib_len, len);
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(max_attrib_len))
+ .RetiresOnSaturation();
+
+ for (size_t ii = 0; ii < num_attribs; ++ii) {
+ const AttribInfo& info = attribs[ii];
+ EXPECT_CALL(*gl,
+ GetActiveAttrib(service_id, ii,
+ max_attrib_len, _, _, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<3>(strlen(info.name)),
+ SetArgumentPointee<4>(info.size),
+ SetArgumentPointee<5>(info.type),
+ SetArrayArgument<6>(info.name,
+ info.name + strlen(info.name) + 1)))
+ .RetiresOnSaturation();
+ if (!ProgramManager::IsInvalidPrefix(info.name, strlen(info.name))) {
+ EXPECT_CALL(*gl, GetAttribLocation(service_id, StrEq(info.name)))
+ .WillOnce(Return(info.location))
+ .RetiresOnSaturation();
+ }
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(num_uniforms))
+ .RetiresOnSaturation();
+
+ size_t max_uniform_len = 0;
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ size_t len = strlen(uniforms[ii].name) + 1;
+ max_uniform_len = std::max(max_uniform_len, len);
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(max_uniform_len))
+ .RetiresOnSaturation();
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ EXPECT_CALL(*gl,
+ GetActiveUniform(service_id, ii,
+ max_uniform_len, _, _, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<3>(strlen(info.name)),
+ SetArgumentPointee<4>(info.size),
+ SetArgumentPointee<5>(info.type),
+ SetArrayArgument<6>(info.name,
+ info.name + strlen(info.name) + 1)))
+ .RetiresOnSaturation();
+ }
+
+ for (int pass = 0; pass < 2; ++pass) {
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ if (ProgramManager::IsInvalidPrefix(info.name, strlen(info.name))) {
+ continue;
+ }
+ if (pass == 0) {
+ EXPECT_CALL(*gl, GetUniformLocation(service_id, StrEq(info.name)))
+ .WillOnce(Return(info.real_location))
+ .RetiresOnSaturation();
+ }
+ if ((pass == 0 && info.desired_location >= 0) ||
+ (pass == 1 && info.desired_location < 0)) {
+ if (info.size > 1) {
+ std::string base_name = info.name;
+ size_t array_pos = base_name.rfind("[0]");
+ if (base_name.size() > 3 && array_pos == base_name.size() - 3) {
+ base_name = base_name.substr(0, base_name.size() - 3);
+ }
+ for (GLsizei jj = 1; jj < info.size; ++jj) {
+ std::string element_name(
+ std::string(base_name) + "[" + base::IntToString(jj) + "]");
+ EXPECT_CALL(*gl, GetUniformLocation(
+ service_id, StrEq(element_name)))
+ .WillOnce(Return(info.real_location + jj * 2))
+ .RetiresOnSaturation();
+ }
+ }
+ }
+ }
+ }
+}
+
+void TestHelper::SetupShader(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ InSequence s;
+
+ EXPECT_CALL(*gl,
+ LinkProgram(service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ SetupProgramSuccessExpectations(
+ gl, attribs, num_attribs, uniforms, num_uniforms, service_id);
+}
+
+void TestHelper::DoBufferData(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ BufferManager* manager, Buffer* buffer, GLsizeiptr size, GLenum usage,
+ const GLvoid* data, GLenum error) {
+ EXPECT_CALL(*error_state, CopyRealGLErrorsToWrapper(_, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (manager->IsUsageClientSideArray(usage)) {
+ EXPECT_CALL(*gl, BufferData(
+ buffer->target(), 0, _, usage))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl, BufferData(
+ buffer->target(), size, _, usage))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*error_state, PeekGLError(_, _, _))
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ manager->DoBufferData(error_state, buffer, size, usage, data);
+}
+
+void TestHelper::SetTexParameteriWithExpectations(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ TextureManager* manager, TextureRef* texture_ref,
+ GLenum pname, GLint value, GLenum error) {
+ if (error == GL_NO_ERROR) {
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ EXPECT_CALL(*gl, TexParameteri(texture_ref->texture()->target(),
+ pname, value))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ } else if (error == GL_INVALID_ENUM) {
+ EXPECT_CALL(*error_state, SetGLErrorInvalidEnum(_, _, _, value, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*error_state, SetGLErrorInvalidParami(_, _, error, _, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ manager->SetParameteri("", error_state, texture_ref, pname, value);
+}
+
+// static
+void TestHelper::SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader,
+ bool expected_valid,
+ const std::string* const expected_log_info,
+ const std::string* const expected_translated_source,
+ const ShaderTranslatorInterface::VariableMap* const expected_attrib_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_uniform_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_varying_map,
+ const ShaderTranslatorInterface::NameMap* const expected_name_map) {
+ const std::string empty_log_info;
+ const std::string* log_info = (expected_log_info && !expected_valid) ?
+ expected_log_info : &empty_log_info;
+ const std::string empty_translated_source;
+ const std::string* translated_source =
+ (expected_translated_source && expected_valid) ?
+ expected_translated_source : &empty_translated_source;
+ const ShaderTranslatorInterface::VariableMap empty_attrib_map;
+ const ShaderTranslatorInterface::VariableMap* attrib_map =
+ (expected_attrib_map && expected_valid) ?
+ expected_attrib_map : &empty_attrib_map;
+ const ShaderTranslatorInterface::VariableMap empty_uniform_map;
+ const ShaderTranslatorInterface::VariableMap* uniform_map =
+ (expected_uniform_map && expected_valid) ?
+ expected_uniform_map : &empty_uniform_map;
+ const ShaderTranslatorInterface::VariableMap empty_varying_map;
+ const ShaderTranslatorInterface::VariableMap* varying_map =
+ (expected_varying_map && expected_valid) ?
+ expected_varying_map : &empty_varying_map;
+ const ShaderTranslatorInterface::NameMap empty_name_map;
+ const ShaderTranslatorInterface::NameMap* name_map =
+ (expected_name_map && expected_valid) ?
+ expected_name_map : &empty_name_map;
+
+ MockShaderTranslator translator;
+ EXPECT_CALL(translator, Translate(_,
+ NotNull(), // log_info
+ NotNull(), // translated_source
+ NotNull(), // attrib_map
+ NotNull(), // uniform_map
+ NotNull(), // varying_map
+ NotNull())) // name_map
+ .WillOnce(DoAll(SetArgumentPointee<1>(*log_info),
+ SetArgumentPointee<2>(*translated_source),
+ SetArgumentPointee<3>(*attrib_map),
+ SetArgumentPointee<4>(*uniform_map),
+ SetArgumentPointee<5>(*varying_map),
+ SetArgumentPointee<6>(*name_map),
+ Return(expected_valid)))
+ .RetiresOnSaturation();
+ if (expected_valid) {
+ EXPECT_CALL(*gl, ShaderSource(shader->service_id(), 1, _, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, CompileShader(shader->service_id()))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetShaderiv(shader->service_id(),
+ GL_COMPILE_STATUS,
+ NotNull())) // status
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE))
+ .RetiresOnSaturation();
+ }
+ shader->DoCompile(&translator, Shader::kGL);
+}
+
+// static
+void TestHelper::SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader, bool valid) {
+ SetShaderStates(gl, shader, valid, NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+ScopedGLImplementationSetter::ScopedGLImplementationSetter(
+ gfx::GLImplementation implementation)
+ : old_implementation_(gfx::GetGLImplementation()) {
+ gfx::SetGLImplementation(implementation);
+}
+
+ScopedGLImplementationSetter::~ScopedGLImplementationSetter() {
+ gfx::SetGLImplementation(old_implementation_);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/test_helper.h b/gpu/command_buffer/service/test_helper.h
new file mode 100644
index 0000000..92e929e
--- /dev/null
+++ b/gpu/command_buffer/service/test_helper.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+struct DisallowedFeatures;
+class Buffer;
+class BufferManager;
+class MockErrorState;
+class Shader;
+class TextureRef;
+class TextureManager;
+
+class TestHelper {
+ public:
+ static const GLuint kServiceBlackTexture2dId = 701;
+ static const GLuint kServiceDefaultTexture2dId = 702;
+ static const GLuint kServiceBlackTextureCubemapId = 703;
+ static const GLuint kServiceDefaultTextureCubemapId = 704;
+ static const GLuint kServiceBlackExternalTextureId = 705;
+ static const GLuint kServiceDefaultExternalTextureId = 706;
+ static const GLuint kServiceBlackRectangleTextureId = 707;
+ static const GLuint kServiceDefaultRectangleTextureId = 708;
+
+ static const GLint kMaxSamples = 4;
+ static const GLint kMaxRenderbufferSize = 1024;
+ static const GLint kMaxTextureSize = 2048;
+ static const GLint kMaxCubeMapTextureSize = 256;
+ static const GLint kNumVertexAttribs = 16;
+ static const GLint kNumTextureUnits = 8;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxVertexTextureImageUnits = 2;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxFragmentUniformComponents =
+ kMaxFragmentUniformVectors * 4;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVaryingFloats = kMaxVaryingVectors * 4;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kMaxVertexUniformComponents = kMaxVertexUniformVectors * 4;
+
+ struct AttribInfo {
+ const char* name;
+ GLint size;
+ GLenum type;
+ GLint location;
+ };
+
+ struct UniformInfo {
+ const char* name;
+ GLint size;
+ GLenum type;
+ GLint fake_location;
+ GLint real_location;
+ GLint desired_location;
+ const char* good_name;
+ };
+
+ static void SetupContextGroupInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const DisallowedFeatures& disallowed_features,
+ const char* extensions,
+ const char* gl_version,
+ bool bind_generates_resource);
+ static void SetupFeatureInfoInitExpectations(
+ ::gfx::MockGLInterface* gl, const char* extensions);
+ static void SetupFeatureInfoInitExpectationsWithGLVersion(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ const char* gl_renderer,
+ const char* gl_version);
+ static void SetupTextureManagerInitExpectations(::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures);
+ static void SetupTextureManagerDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures);
+
+ static void SetupExpectationsForClearingUniforms(
+ ::gfx::MockGLInterface* gl, UniformInfo* uniforms, size_t num_uniforms);
+
+ static void SetupShader(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id);
+
+ static void SetupProgramSuccessExpectations(::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id);
+
+ static void DoBufferData(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ BufferManager* manager, Buffer* buffer, GLsizeiptr size, GLenum usage,
+ const GLvoid* data, GLenum error);
+
+ static void SetTexParameteriWithExpectations(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ TextureManager* manager, TextureRef* texture_ref,
+ GLenum pname, GLint value, GLenum error);
+
+ static void SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader,
+ bool expected_valid,
+ const std::string* const expected_log_info,
+ const std::string* const expected_translated_source,
+ const ShaderTranslatorInterface::VariableMap* const expected_attrib_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_uniform_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_varying_map,
+ const ShaderTranslatorInterface::NameMap* const expected_name_map);
+
+ static void SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader, bool valid);
+
+ private:
+ static void SetupTextureInitializationExpectations(::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures);
+ static void SetupTextureDestructionExpectations(::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures);
+};
+
+// This object temporaritly Sets what gfx::GetGLImplementation returns. During
+// testing the GLImplementation is set to kGLImplemenationMockGL but lots of
+// code branches based on what gfx::GetGLImplementation returns.
+class ScopedGLImplementationSetter {
+ public:
+ explicit ScopedGLImplementationSetter(gfx::GLImplementation implementation);
+ ~ScopedGLImplementationSetter();
+
+ private:
+ gfx::GLImplementation old_implementation_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+
diff --git a/gpu/command_buffer/service/texture_definition.cc b/gpu/command_buffer/service/texture_definition.cc
new file mode 100644
index 0000000..393dda0
--- /dev/null
+++ b/gpu/command_buffer/service/texture_definition.cc
@@ -0,0 +1,496 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_definition.h"
+
+#include <list>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/scoped_binders.h"
+
+#if !defined(OS_MACOSX)
+#include "ui/gl/gl_fence_egl.h"
+#include "ui/gl/gl_surface_egl.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+class GLImageSync : public gfx::GLImage {
+ public:
+ explicit GLImageSync(const scoped_refptr<NativeImageBuffer>& buffer,
+ const gfx::Size& size);
+
+ // Implement GLImage.
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual bool BindTexImage(unsigned target) OVERRIDE;
+ virtual void ReleaseTexImage(unsigned target) OVERRIDE;
+ virtual bool CopyTexImage(unsigned target) OVERRIDE;
+ virtual void WillUseTexImage() OVERRIDE;
+ virtual void WillModifyTexImage() OVERRIDE;
+ virtual void DidModifyTexImage() OVERRIDE;
+ virtual void DidUseTexImage() OVERRIDE;
+ virtual bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) OVERRIDE;
+
+ protected:
+ virtual ~GLImageSync();
+
+ private:
+ scoped_refptr<NativeImageBuffer> buffer_;
+ gfx::Size size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImageSync);
+};
+
+GLImageSync::GLImageSync(const scoped_refptr<NativeImageBuffer>& buffer,
+ const gfx::Size& size)
+ : buffer_(buffer), size_(size) {
+ if (buffer.get())
+ buffer->AddClient(this);
+}
+
+GLImageSync::~GLImageSync() {
+ if (buffer_.get())
+ buffer_->RemoveClient(this);
+}
+
+void GLImageSync::Destroy(bool have_context) {
+}
+
+gfx::Size GLImageSync::GetSize() {
+ return size_;
+}
+
+bool GLImageSync::BindTexImage(unsigned target) {
+ NOTREACHED();
+ return false;
+}
+
+void GLImageSync::ReleaseTexImage(unsigned target) {
+ NOTREACHED();
+}
+
+bool GLImageSync::CopyTexImage(unsigned target) {
+ return false;
+}
+
+void GLImageSync::WillUseTexImage() {
+ if (buffer_.get())
+ buffer_->WillRead(this);
+}
+
+void GLImageSync::DidUseTexImage() {
+ if (buffer_.get())
+ buffer_->DidRead(this);
+}
+
+void GLImageSync::WillModifyTexImage() {
+ if (buffer_.get())
+ buffer_->WillWrite(this);
+}
+
+void GLImageSync::DidModifyTexImage() {
+ if (buffer_.get())
+ buffer_->DidWrite(this);
+}
+
+bool GLImageSync::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) {
+ NOTREACHED();
+ return false;
+}
+
+#if !defined(OS_MACOSX)
+class NativeImageBufferEGL : public NativeImageBuffer {
+ public:
+ static scoped_refptr<NativeImageBufferEGL> Create(GLuint texture_id);
+
+ private:
+ NativeImageBufferEGL(EGLDisplay display, EGLImageKHR image);
+ virtual ~NativeImageBufferEGL();
+ virtual void AddClient(gfx::GLImage* client) OVERRIDE;
+ virtual void RemoveClient(gfx::GLImage* client) OVERRIDE;
+ virtual bool IsClient(gfx::GLImage* client) OVERRIDE;
+ virtual void BindToTexture(GLenum target) OVERRIDE;
+ virtual void WillRead(gfx::GLImage* client) OVERRIDE;
+ virtual void WillWrite(gfx::GLImage* client) OVERRIDE;
+ virtual void DidRead(gfx::GLImage* client) OVERRIDE;
+ virtual void DidWrite(gfx::GLImage* client) OVERRIDE;
+
+ EGLDisplay egl_display_;
+ EGLImageKHR egl_image_;
+
+ base::Lock lock_;
+
+ struct ClientInfo {
+ ClientInfo(gfx::GLImage* client);
+ ~ClientInfo();
+
+ gfx::GLImage* client;
+ bool needs_wait_before_read;
+ linked_ptr<gfx::GLFence> read_fence;
+ };
+ std::list<ClientInfo> client_infos_;
+ scoped_ptr<gfx::GLFence> write_fence_;
+ gfx::GLImage* write_client_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBufferEGL);
+};
+
+scoped_refptr<NativeImageBufferEGL> NativeImageBufferEGL::Create(
+ GLuint texture_id) {
+ EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ EGLContext egl_context = eglGetCurrentContext();
+
+ DCHECK_NE(EGL_NO_CONTEXT, egl_context);
+ DCHECK_NE(EGL_NO_DISPLAY, egl_display);
+ DCHECK(glIsTexture(texture_id));
+
+ DCHECK(gfx::g_driver_egl.ext.b_EGL_KHR_image_base &&
+ gfx::g_driver_egl.ext.b_EGL_KHR_gl_texture_2D_image &&
+ gfx::g_driver_gl.ext.b_GL_OES_EGL_image &&
+ gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync);
+
+ const EGLint egl_attrib_list[] = {
+ EGL_GL_TEXTURE_LEVEL_KHR, 0, EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE};
+ EGLClientBuffer egl_buffer = reinterpret_cast<EGLClientBuffer>(texture_id);
+ EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR; // TODO
+
+ EGLImageKHR egl_image = eglCreateImageKHR(
+ egl_display, egl_context, egl_target, egl_buffer, egl_attrib_list);
+
+ if (egl_image == EGL_NO_IMAGE_KHR)
+ return NULL;
+
+ return new NativeImageBufferEGL(egl_display, egl_image);
+}
+
+NativeImageBufferEGL::ClientInfo::ClientInfo(gfx::GLImage* client)
+ : client(client), needs_wait_before_read(true) {}
+
+NativeImageBufferEGL::ClientInfo::~ClientInfo() {}
+
+NativeImageBufferEGL::NativeImageBufferEGL(EGLDisplay display,
+ EGLImageKHR image)
+ : NativeImageBuffer(),
+ egl_display_(display),
+ egl_image_(image),
+ write_fence_(new gfx::GLFenceEGL(true)),
+ write_client_(NULL) {
+ DCHECK(egl_display_ != EGL_NO_DISPLAY);
+ DCHECK(egl_image_ != EGL_NO_IMAGE_KHR);
+}
+
+NativeImageBufferEGL::~NativeImageBufferEGL() {
+ DCHECK(client_infos_.empty());
+ if (egl_image_ != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display_, egl_image_);
+}
+
+void NativeImageBufferEGL::AddClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ client_infos_.push_back(ClientInfo(client));
+}
+
+void NativeImageBufferEGL::RemoveClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (write_client_ == client)
+ write_client_ = NULL;
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ client_infos_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+bool NativeImageBufferEGL::IsClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client)
+ return true;
+ }
+ return false;
+}
+
+void NativeImageBufferEGL::BindToTexture(GLenum target) {
+ DCHECK(egl_image_ != EGL_NO_IMAGE_KHR);
+ glEGLImageTargetTexture2DOES(target, egl_image_);
+ DCHECK_EQ(static_cast<EGLint>(EGL_SUCCESS), eglGetError());
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+}
+
+void NativeImageBufferEGL::WillRead(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (!write_fence_.get() || write_client_ == client)
+ return;
+
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ if (it->needs_wait_before_read) {
+ it->needs_wait_before_read = false;
+ write_fence_->ServerWait();
+ }
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void NativeImageBufferEGL::WillWrite(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (write_client_ != client)
+ write_fence_->ServerWait();
+
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->read_fence.get() && it->client != client)
+ it->read_fence->ServerWait();
+ }
+}
+
+void NativeImageBufferEGL::DidRead(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ it->read_fence = make_linked_ptr(new gfx::GLFenceEGL(true));
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void NativeImageBufferEGL::DidWrite(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ // Sharing semantics require the client to flush in order to make changes
+ // visible to other clients.
+ write_fence_.reset(new gfx::GLFenceEGL(false));
+ write_client_ = client;
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ it->needs_wait_before_read = true;
+ }
+}
+
+#endif
+
+class NativeImageBufferStub : public NativeImageBuffer {
+ public:
+ NativeImageBufferStub() : NativeImageBuffer() {}
+
+ private:
+ virtual ~NativeImageBufferStub() {}
+ virtual void AddClient(gfx::GLImage* client) OVERRIDE {}
+ virtual void RemoveClient(gfx::GLImage* client) OVERRIDE {}
+ virtual bool IsClient(gfx::GLImage* client) OVERRIDE { return true; }
+ virtual void BindToTexture(GLenum target) OVERRIDE {}
+ virtual void WillRead(gfx::GLImage* client) OVERRIDE {}
+ virtual void WillWrite(gfx::GLImage* client) OVERRIDE {}
+ virtual void DidRead(gfx::GLImage* client) OVERRIDE {}
+ virtual void DidWrite(gfx::GLImage* client) OVERRIDE {}
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBufferStub);
+};
+
+} // anonymous namespace
+
+// static
+scoped_refptr<NativeImageBuffer> NativeImageBuffer::Create(GLuint texture_id) {
+ switch (gfx::GetGLImplementation()) {
+#if !defined(OS_MACOSX)
+ case gfx::kGLImplementationEGLGLES2:
+ return NativeImageBufferEGL::Create(texture_id);
+#endif
+ case gfx::kGLImplementationMockGL:
+ return new NativeImageBufferStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+TextureDefinition::LevelInfo::LevelInfo(GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared)
+ : target(target),
+ internal_format(internal_format),
+ width(width),
+ height(height),
+ depth(depth),
+ border(border),
+ format(format),
+ type(type),
+ cleared(cleared) {}
+
+TextureDefinition::LevelInfo::~LevelInfo() {}
+
+TextureDefinition::TextureDefinition(
+ GLenum target,
+ Texture* texture,
+ unsigned int version,
+ const scoped_refptr<NativeImageBuffer>& image_buffer)
+ : version_(version),
+ target_(target),
+ image_buffer_(image_buffer.get()
+ ? image_buffer
+ : NativeImageBuffer::Create(texture->service_id())),
+ min_filter_(texture->min_filter()),
+ mag_filter_(texture->mag_filter()),
+ wrap_s_(texture->wrap_s()),
+ wrap_t_(texture->wrap_t()),
+ usage_(texture->usage()),
+ immutable_(texture->IsImmutable()) {
+ // TODO
+ DCHECK(!texture->level_infos_.empty());
+ DCHECK(!texture->level_infos_[0].empty());
+ DCHECK(!texture->NeedsMips());
+ DCHECK(texture->level_infos_[0][0].width);
+ DCHECK(texture->level_infos_[0][0].height);
+
+ scoped_refptr<gfx::GLImage> gl_image(
+ new GLImageSync(image_buffer_,
+ gfx::Size(texture->level_infos_[0][0].width,
+ texture->level_infos_[0][0].height)));
+ texture->SetLevelImage(NULL, target, 0, gl_image.get());
+
+ // TODO: all levels
+ level_infos_.clear();
+ const Texture::LevelInfo& level = texture->level_infos_[0][0];
+ LevelInfo info(level.target,
+ level.internal_format,
+ level.width,
+ level.height,
+ level.depth,
+ level.border,
+ level.format,
+ level.type,
+ level.cleared);
+ std::vector<LevelInfo> infos;
+ infos.push_back(info);
+ level_infos_.push_back(infos);
+}
+
+TextureDefinition::~TextureDefinition() {
+}
+
+Texture* TextureDefinition::CreateTexture() const {
+ if (!image_buffer_.get())
+ return NULL;
+
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+
+ Texture* texture(new Texture(texture_id));
+ UpdateTexture(texture);
+
+ return texture;
+}
+
+void TextureDefinition::UpdateTexture(Texture* texture) const {
+ gfx::ScopedTextureBinder texture_binder(target_, texture->service_id());
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, min_filter_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mag_filter_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_s_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_t_);
+ if (image_buffer_.get())
+ image_buffer_->BindToTexture(target_);
+ // We have to make sure the changes are visible to other clients in this share
+ // group. As far as the clients are concerned, the mailbox semantics only
+ // demand a single flush from the client after changes are first made,
+ // and it is not visible to them when another share group boundary is crossed.
+ // We could probably track this and be a bit smarter about when to flush
+ // though.
+ glFlush();
+
+ texture->level_infos_.resize(1);
+ for (size_t i = 0; i < level_infos_.size(); i++) {
+ const LevelInfo& base_info = level_infos_[i][0];
+ const size_t levels_needed = TextureManager::ComputeMipMapCount(
+ base_info.target, base_info.width, base_info.height, base_info.depth);
+ DCHECK(level_infos_.size() <= levels_needed);
+ texture->level_infos_[0].resize(levels_needed);
+ for (size_t n = 0; n < level_infos_.size(); n++) {
+ const LevelInfo& info = level_infos_[i][n];
+ texture->SetLevelInfo(NULL,
+ info.target,
+ i,
+ info.internal_format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.cleared);
+ }
+ }
+ if (image_buffer_.get()) {
+ texture->SetLevelImage(
+ NULL,
+ target_,
+ 0,
+ new GLImageSync(
+ image_buffer_,
+ gfx::Size(level_infos_[0][0].width, level_infos_[0][0].height)));
+ }
+
+ texture->target_ = target_;
+ texture->SetImmutable(immutable_);
+ texture->min_filter_ = min_filter_;
+ texture->mag_filter_ = mag_filter_;
+ texture->wrap_s_ = wrap_s_;
+ texture->wrap_t_ = wrap_t_;
+ texture->usage_ = usage_;
+}
+
+bool TextureDefinition::Matches(const Texture* texture) const {
+ DCHECK(target_ == texture->target());
+ if (texture->min_filter_ != min_filter_ ||
+ texture->mag_filter_ != mag_filter_ ||
+ texture->wrap_s_ != wrap_s_ ||
+ texture->wrap_t_ != wrap_t_) {
+ return false;
+ }
+
+ // All structural changes should have orphaned the texture.
+ if (image_buffer_.get() && !texture->GetLevelImage(texture->target(), 0))
+ return false;
+
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/texture_definition.h b/gpu/command_buffer/service/texture_definition.h
new file mode 100644
index 0000000..6df4b86
--- /dev/null
+++ b/gpu/command_buffer/service/texture_definition.h
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+
+namespace gfx {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class Texture;
+
+class NativeImageBuffer : public base::RefCountedThreadSafe<NativeImageBuffer> {
+ public:
+ static scoped_refptr<NativeImageBuffer> Create(GLuint texture_id);
+
+ virtual void AddClient(gfx::GLImage* client) = 0;
+ virtual void RemoveClient(gfx::GLImage* client) = 0;
+ virtual bool IsClient(gfx::GLImage* client) = 0;
+ virtual void BindToTexture(GLenum target) = 0;
+ virtual void WillRead(gfx::GLImage* client) = 0;
+ virtual void WillWrite(gfx::GLImage* client) = 0;
+ virtual void DidRead(gfx::GLImage* client) = 0;
+ virtual void DidWrite(gfx::GLImage* client) = 0;
+
+ protected:
+ friend class base::RefCountedThreadSafe<NativeImageBuffer>;
+ NativeImageBuffer() {}
+ virtual ~NativeImageBuffer() {}
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBuffer);
+};
+
+// An immutable description that can be used to create a texture that shares
+// the underlying image buffer(s).
+class TextureDefinition {
+ public:
+ TextureDefinition(GLenum target,
+ Texture* texture,
+ unsigned int version,
+ const scoped_refptr<NativeImageBuffer>& image);
+ virtual ~TextureDefinition();
+
+ Texture* CreateTexture() const;
+ void UpdateTexture(Texture* texture) const;
+
+ unsigned int version() const { return version_; }
+ bool IsOlderThan(unsigned int version) const {
+ return (version - version_) < 0x80000000;
+ }
+ bool Matches(const Texture* texture) const;
+
+ scoped_refptr<NativeImageBuffer> image() { return image_buffer_; }
+
+ private:
+ struct LevelInfo {
+ LevelInfo(GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+ ~LevelInfo();
+
+ GLenum target;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ bool cleared;
+ };
+
+ typedef std::vector<std::vector<LevelInfo> > LevelInfos;
+
+ unsigned int version_;
+ GLenum target_;
+ scoped_refptr<NativeImageBuffer> image_buffer_;
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ bool immutable_;
+ LevelInfos level_infos_;
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
diff --git a/gpu/command_buffer/service/texture_manager.cc b/gpu/command_buffer/service/texture_manager.cc
new file mode 100644
index 0000000..bfbdcb1
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager.cc
@@ -0,0 +1,1634 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_manager.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/bits.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This should contain everything to uniquely identify a Texture.
+static const char TextureTag[] = "|Texture|";
+struct TextureSignature {
+ GLenum target_;
+ GLint level_;
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ GLenum internal_format_;
+ GLsizei width_;
+ GLsizei height_;
+ GLsizei depth_;
+ GLint border_;
+ GLenum format_;
+ GLenum type_;
+ bool has_image_;
+ bool can_render_;
+ bool can_render_to_;
+ bool npot_;
+
+ // Since we will be hashing this signature structure, the padding must be
+ // zero initialized. Although the C++11 specifications specify that this is
+ // true, we will use a constructor with a memset to further enforce it instead
+ // of relying on compilers adhering to this deep dark corner specification.
+ TextureSignature(GLenum target,
+ GLint level,
+ GLenum min_filter,
+ GLenum mag_filter,
+ GLenum wrap_s,
+ GLenum wrap_t,
+ GLenum usage,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool has_image,
+ bool can_render,
+ bool can_render_to,
+ bool npot) {
+ memset(this, 0, sizeof(TextureSignature));
+ target_ = target;
+ level_ = level;
+ min_filter_ = min_filter;
+ mag_filter_ = mag_filter;
+ wrap_s_ = wrap_s;
+ wrap_t_ = wrap_t;
+ usage_ = usage;
+ internal_format_ = internal_format;
+ width_ = width;
+ height_ = height;
+ depth_ = depth;
+ border_ = border;
+ format_ = format;
+ type_ = type;
+ has_image_ = has_image;
+ can_render_ = can_render;
+ can_render_to_ = can_render_to;
+ npot_ = npot;
+ }
+};
+
+TextureManager::DestructionObserver::DestructionObserver() {}
+
+TextureManager::DestructionObserver::~DestructionObserver() {}
+
+TextureManager::~TextureManager() {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++)
+ destruction_observers_[i]->OnTextureManagerDestroying(this);
+
+ DCHECK(textures_.empty());
+
+ // If this triggers, that means something is keeping a reference to
+ // a Texture belonging to this.
+ CHECK_EQ(texture_count_, 0u);
+
+ DCHECK_EQ(0, num_unrenderable_textures_);
+ DCHECK_EQ(0, num_unsafe_textures_);
+ DCHECK_EQ(0, num_uncleared_mips_);
+ DCHECK_EQ(0, num_images_);
+}
+
+void TextureManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ textures_.clear();
+ for (int ii = 0; ii < kNumDefaultTextures; ++ii) {
+ default_textures_[ii] = NULL;
+ }
+
+ if (have_context) {
+ glDeleteTextures(arraysize(black_texture_ids_), black_texture_ids_);
+ }
+
+ DCHECK_EQ(0u, memory_tracker_managed_->GetMemRepresented());
+ DCHECK_EQ(0u, memory_tracker_unmanaged_->GetMemRepresented());
+}
+
+Texture::Texture(GLuint service_id)
+ : mailbox_manager_(NULL),
+ memory_tracking_ref_(NULL),
+ service_id_(service_id),
+ cleared_(true),
+ num_uncleared_mips_(0),
+ target_(0),
+ min_filter_(GL_NEAREST_MIPMAP_LINEAR),
+ mag_filter_(GL_LINEAR),
+ wrap_s_(GL_REPEAT),
+ wrap_t_(GL_REPEAT),
+ usage_(GL_NONE),
+ pool_(GL_TEXTURE_POOL_UNMANAGED_CHROMIUM),
+ max_level_set_(-1),
+ texture_complete_(false),
+ cube_complete_(false),
+ npot_(false),
+ has_been_bound_(false),
+ framebuffer_attachment_count_(0),
+ immutable_(false),
+ has_images_(false),
+ estimated_size_(0),
+ can_render_condition_(CAN_RENDER_ALWAYS),
+ texture_max_anisotropy_initialized_(false) {
+}
+
+Texture::~Texture() {
+ if (mailbox_manager_)
+ mailbox_manager_->TextureDeleted(this);
+}
+
+void Texture::AddTextureRef(TextureRef* ref) {
+ DCHECK(refs_.find(ref) == refs_.end());
+ refs_.insert(ref);
+ if (!memory_tracking_ref_) {
+ memory_tracking_ref_ = ref;
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ }
+}
+
+void Texture::RemoveTextureRef(TextureRef* ref, bool have_context) {
+ if (memory_tracking_ref_ == ref) {
+ GetMemTracker()->TrackMemFree(estimated_size());
+ memory_tracking_ref_ = NULL;
+ }
+ size_t result = refs_.erase(ref);
+ DCHECK_EQ(result, 1u);
+ if (refs_.empty()) {
+ if (have_context) {
+ GLuint id = service_id();
+ glDeleteTextures(1, &id);
+ }
+ delete this;
+ } else if (memory_tracking_ref_ == NULL) {
+ // TODO(piman): tune ownership semantics for cross-context group shared
+ // textures.
+ memory_tracking_ref_ = *refs_.begin();
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ }
+}
+
+MemoryTypeTracker* Texture::GetMemTracker() {
+ DCHECK(memory_tracking_ref_);
+ return memory_tracking_ref_->manager()->GetMemTracker(pool_);
+}
+
+Texture::LevelInfo::LevelInfo()
+ : cleared(true),
+ target(0),
+ level(-1),
+ internal_format(0),
+ width(0),
+ height(0),
+ depth(0),
+ border(0),
+ format(0),
+ type(0),
+ estimated_size(0) {
+}
+
+Texture::LevelInfo::LevelInfo(const LevelInfo& rhs)
+ : cleared(rhs.cleared),
+ target(rhs.target),
+ level(rhs.level),
+ internal_format(rhs.internal_format),
+ width(rhs.width),
+ height(rhs.height),
+ depth(rhs.depth),
+ border(rhs.border),
+ format(rhs.format),
+ type(rhs.type),
+ image(rhs.image),
+ estimated_size(rhs.estimated_size) {
+}
+
+Texture::LevelInfo::~LevelInfo() {
+}
+
+Texture::CanRenderCondition Texture::GetCanRenderCondition() const {
+ if (target_ == 0)
+ return CAN_RENDER_ALWAYS;
+
+ if (target_ != GL_TEXTURE_EXTERNAL_OES) {
+ if (level_infos_.empty()) {
+ return CAN_RENDER_NEVER;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ if (first_face.width == 0 ||
+ first_face.height == 0 ||
+ first_face.depth == 0) {
+ return CAN_RENDER_NEVER;
+ }
+ }
+
+ bool needs_mips = NeedsMips();
+ if (needs_mips) {
+ if (!texture_complete())
+ return CAN_RENDER_NEVER;
+ if (target_ == GL_TEXTURE_CUBE_MAP && !cube_complete())
+ return CAN_RENDER_NEVER;
+ }
+
+ bool is_npot_compatible = !needs_mips &&
+ wrap_s_ == GL_CLAMP_TO_EDGE &&
+ wrap_t_ == GL_CLAMP_TO_EDGE;
+
+ if (!is_npot_compatible) {
+ if (target_ == GL_TEXTURE_RECTANGLE_ARB)
+ return CAN_RENDER_NEVER;
+ else if (npot())
+ return CAN_RENDER_ONLY_IF_NPOT;
+ }
+
+ return CAN_RENDER_ALWAYS;
+}
+
+bool Texture::CanRender(const FeatureInfo* feature_info) const {
+ switch (can_render_condition_) {
+ case CAN_RENDER_ALWAYS:
+ return true;
+ case CAN_RENDER_NEVER:
+ return false;
+ case CAN_RENDER_ONLY_IF_NPOT:
+ break;
+ }
+ return feature_info->feature_flags().npot_ok;
+}
+
+void Texture::AddToSignature(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ std::string* signature) const {
+ DCHECK(feature_info);
+ DCHECK(signature);
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+
+ const Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+
+ TextureSignature signature_data(target,
+ level,
+ min_filter_,
+ mag_filter_,
+ wrap_s_,
+ wrap_t_,
+ usage_,
+ info.internal_format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.image.get() != NULL,
+ CanRender(feature_info),
+ CanRenderTo(),
+ npot_);
+
+ signature->append(TextureTag, sizeof(TextureTag));
+ signature->append(reinterpret_cast<const char*>(&signature_data),
+ sizeof(signature_data));
+}
+
+void Texture::SetMailboxManager(MailboxManager* mailbox_manager) {
+ DCHECK(!mailbox_manager_ || mailbox_manager_ == mailbox_manager);
+ mailbox_manager_ = mailbox_manager;
+}
+
+bool Texture::MarkMipmapsGenerated(
+ const FeatureInfo* feature_info) {
+ if (!CanGenerateMipmaps(feature_info)) {
+ return false;
+ }
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const Texture::LevelInfo& info1 = level_infos_[ii][0];
+ GLsizei width = info1.width;
+ GLsizei height = info1.height;
+ GLsizei depth = info1.depth;
+ GLenum target = target_ == GL_TEXTURE_2D ? GL_TEXTURE_2D :
+ GLES2Util::IndexToGLFaceTarget(ii);
+ int num_mips =
+ TextureManager::ComputeMipMapCount(target_, width, height, depth);
+ for (int level = 1; level < num_mips; ++level) {
+ width = std::max(1, width >> 1);
+ height = std::max(1, height >> 1);
+ depth = std::max(1, depth >> 1);
+ SetLevelInfo(feature_info,
+ target,
+ level,
+ info1.internal_format,
+ width,
+ height,
+ depth,
+ info1.border,
+ info1.format,
+ info1.type,
+ true);
+ }
+ }
+
+ return true;
+}
+
+void Texture::SetTarget(
+ const FeatureInfo* feature_info, GLenum target, GLint max_levels) {
+ DCHECK_EQ(0u, target_); // you can only set this once.
+ target_ = target;
+ size_t num_faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ level_infos_.resize(num_faces);
+ for (size_t ii = 0; ii < num_faces; ++ii) {
+ level_infos_[ii].resize(max_levels);
+ }
+
+ if (target == GL_TEXTURE_EXTERNAL_OES || target == GL_TEXTURE_RECTANGLE_ARB) {
+ min_filter_ = GL_LINEAR;
+ wrap_s_ = wrap_t_ = GL_CLAMP_TO_EDGE;
+ }
+
+ if (target == GL_TEXTURE_EXTERNAL_OES) {
+ immutable_ = true;
+ }
+ Update(feature_info);
+ UpdateCanRenderCondition();
+}
+
+bool Texture::CanGenerateMipmaps(
+ const FeatureInfo* feature_info) const {
+ if ((npot() && !feature_info->feature_flags().npot_ok) ||
+ level_infos_.empty() ||
+ target_ == GL_TEXTURE_EXTERNAL_OES ||
+ target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ return false;
+ }
+
+ // Can't generate mips for depth or stencil textures.
+ const Texture::LevelInfo& first = level_infos_[0][0];
+ uint32 channels = GLES2Util::GetChannelsForFormat(first.format);
+ if (channels & (GLES2Util::kDepth | GLES2Util::kStencil)) {
+ return false;
+ }
+
+ // TODO(gman): Check internal_format, format and type.
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const LevelInfo& info = level_infos_[ii][0];
+ if ((info.target == 0) || (info.width != first.width) ||
+ (info.height != first.height) || (info.depth != 1) ||
+ (info.format != first.format) ||
+ (info.internal_format != first.internal_format) ||
+ (info.type != first.type) ||
+ feature_info->validators()->compressed_texture_format.IsValid(
+ info.internal_format) ||
+ info.image.get()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void Texture::SetLevelCleared(GLenum target, GLint level, bool cleared) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ UpdateMipCleared(&info, cleared);
+ UpdateCleared();
+}
+
+void Texture::UpdateCleared() {
+ if (level_infos_.empty()) {
+ return;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+ bool cleared = true;
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (GLint jj = 0; jj < levels_needed; ++jj) {
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.width > 0 && info.height > 0 && info.depth > 0 &&
+ !info.cleared) {
+ cleared = false;
+ break;
+ }
+ }
+ }
+
+ // If texture is uncleared and is attached to a framebuffer,
+ // that framebuffer must be marked possibly incomplete.
+ if (!cleared && IsAttachedToFramebuffer()) {
+ IncAllFramebufferStateChangeCount();
+ }
+
+ UpdateSafeToRenderFrom(cleared);
+}
+
+void Texture::UpdateSafeToRenderFrom(bool cleared) {
+ if (cleared_ == cleared)
+ return;
+ cleared_ = cleared;
+ int delta = cleared ? -1 : +1;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateSafeToRenderFrom(delta);
+}
+
+void Texture::UpdateMipCleared(LevelInfo* info, bool cleared) {
+ if (info->cleared == cleared)
+ return;
+ info->cleared = cleared;
+ int delta = cleared ? -1 : +1;
+ num_uncleared_mips_ += delta;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateUnclearedMips(delta);
+}
+
+void Texture::UpdateCanRenderCondition() {
+ CanRenderCondition can_render_condition = GetCanRenderCondition();
+ if (can_render_condition_ == can_render_condition)
+ return;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateCanRenderCondition(can_render_condition_,
+ can_render_condition);
+ can_render_condition_ = can_render_condition;
+}
+
+void Texture::UpdateHasImages() {
+ if (level_infos_.empty())
+ return;
+
+ bool has_images = false;
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (size_t jj = 0; jj < level_infos_[ii].size(); ++jj) {
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.image.get() != NULL) {
+ has_images = true;
+ break;
+ }
+ }
+ }
+
+ if (has_images_ == has_images)
+ return;
+ has_images_ = has_images;
+ int delta = has_images ? +1 : -1;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateNumImages(delta);
+}
+
+void Texture::IncAllFramebufferStateChangeCount() {
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->IncFramebufferStateChangeCount();
+}
+
+void Texture::SetLevelInfo(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ DCHECK_GE(width, 0);
+ DCHECK_GE(height, 0);
+ DCHECK_GE(depth, 0);
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ info.target = target;
+ info.level = level;
+ info.internal_format = internal_format;
+ info.width = width;
+ info.height = height;
+ info.depth = depth;
+ info.border = border;
+ info.format = format;
+ info.type = type;
+ info.image = 0;
+
+ estimated_size_ -= info.estimated_size;
+ GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, 4, &info.estimated_size, NULL, NULL);
+ estimated_size_ += info.estimated_size;
+
+ UpdateMipCleared(&info, cleared);
+ max_level_set_ = std::max(max_level_set_, level);
+ Update(feature_info);
+ UpdateCleared();
+ UpdateCanRenderCondition();
+ UpdateHasImages();
+ if (IsAttachedToFramebuffer()) {
+ // TODO(gman): If textures tracked which framebuffers they were attached to
+ // we could just mark those framebuffers as not complete.
+ IncAllFramebufferStateChangeCount();
+ }
+}
+
+bool Texture::ValidForTexture(
+ GLint target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum type) const {
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ int32 right;
+ int32 top;
+ return SafeAddInt32(xoffset, width, &right) &&
+ SafeAddInt32(yoffset, height, &top) &&
+ xoffset >= 0 &&
+ yoffset >= 0 &&
+ right <= info.width &&
+ top <= info.height &&
+ type == info.type;
+ }
+ return false;
+}
+
+bool Texture::GetLevelSize(
+ GLint target, GLint level, GLsizei* width, GLsizei* height) const {
+ DCHECK(width);
+ DCHECK(height);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ *width = info.width;
+ *height = info.height;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Texture::GetLevelType(
+ GLint target, GLint level, GLenum* type, GLenum* internal_format) const {
+ DCHECK(type);
+ DCHECK(internal_format);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ *type = info.type;
+ *internal_format = info.internal_format;
+ return true;
+ }
+ }
+ return false;
+}
+
+GLenum Texture::SetParameteri(
+ const FeatureInfo* feature_info, GLenum pname, GLint param) {
+ DCHECK(feature_info);
+
+ if (target_ == GL_TEXTURE_EXTERNAL_OES ||
+ target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ if (pname == GL_TEXTURE_MIN_FILTER &&
+ (param != GL_NEAREST && param != GL_LINEAR))
+ return GL_INVALID_ENUM;
+ if ((pname == GL_TEXTURE_WRAP_S || pname == GL_TEXTURE_WRAP_T) &&
+ param != GL_CLAMP_TO_EDGE)
+ return GL_INVALID_ENUM;
+ }
+
+ switch (pname) {
+ case GL_TEXTURE_MIN_FILTER:
+ if (!feature_info->validators()->texture_min_filter_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ min_filter_ = param;
+ break;
+ case GL_TEXTURE_MAG_FILTER:
+ if (!feature_info->validators()->texture_mag_filter_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ mag_filter_ = param;
+ break;
+ case GL_TEXTURE_POOL_CHROMIUM:
+ if (!feature_info->validators()->texture_pool.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ GetMemTracker()->TrackMemFree(estimated_size());
+ pool_ = param;
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ break;
+ case GL_TEXTURE_WRAP_S:
+ if (!feature_info->validators()->texture_wrap_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ wrap_s_ = param;
+ break;
+ case GL_TEXTURE_WRAP_T:
+ if (!feature_info->validators()->texture_wrap_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ wrap_t_ = param;
+ break;
+ case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+ if (param < 1) {
+ return GL_INVALID_VALUE;
+ }
+ break;
+ case GL_TEXTURE_USAGE_ANGLE:
+ if (!feature_info->validators()->texture_usage.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ usage_ = param;
+ break;
+ default:
+ NOTREACHED();
+ return GL_INVALID_ENUM;
+ }
+ Update(feature_info);
+ UpdateCleared();
+ UpdateCanRenderCondition();
+ return GL_NO_ERROR;
+}
+
+GLenum Texture::SetParameterf(
+ const FeatureInfo* feature_info, GLenum pname, GLfloat param) {
+ switch (pname) {
+ case GL_TEXTURE_MIN_FILTER:
+ case GL_TEXTURE_MAG_FILTER:
+ case GL_TEXTURE_POOL_CHROMIUM:
+ case GL_TEXTURE_WRAP_S:
+ case GL_TEXTURE_WRAP_T:
+ case GL_TEXTURE_USAGE_ANGLE:
+ {
+ GLint iparam = static_cast<GLint>(param);
+ return SetParameteri(feature_info, pname, iparam);
+ }
+ case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+ if (param < 1.f) {
+ return GL_INVALID_VALUE;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return GL_INVALID_ENUM;
+ }
+ return GL_NO_ERROR;
+}
+
+void Texture::Update(const FeatureInfo* feature_info) {
+ // Update npot status.
+ // Assume GL_TEXTURE_EXTERNAL_OES textures are npot, all others
+ npot_ = target_ == GL_TEXTURE_EXTERNAL_OES;
+
+ if (level_infos_.empty()) {
+ texture_complete_ = false;
+ cube_complete_ = false;
+ return;
+ }
+
+ // checks that the first mip of any face is npot.
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const Texture::LevelInfo& info = level_infos_[ii][0];
+ if (GLES2Util::IsNPOT(info.width) ||
+ GLES2Util::IsNPOT(info.height) ||
+ GLES2Util::IsNPOT(info.depth)) {
+ npot_ = true;
+ break;
+ }
+ }
+
+ // Update texture_complete and cube_complete status.
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+ texture_complete_ =
+ max_level_set_ >= (levels_needed - 1) && max_level_set_ >= 0;
+ cube_complete_ = (level_infos_.size() == 6) &&
+ (first_face.width == first_face.height);
+
+ if (first_face.width == 0 || first_face.height == 0) {
+ texture_complete_ = false;
+ }
+ if (first_face.type == GL_FLOAT &&
+ !feature_info->feature_flags().enable_texture_float_linear &&
+ (min_filter_ != GL_NEAREST_MIPMAP_NEAREST ||
+ mag_filter_ != GL_NEAREST)) {
+ texture_complete_ = false;
+ } else if (first_face.type == GL_HALF_FLOAT_OES &&
+ !feature_info->feature_flags().enable_texture_half_float_linear &&
+ (min_filter_ != GL_NEAREST_MIPMAP_NEAREST ||
+ mag_filter_ != GL_NEAREST)) {
+ texture_complete_ = false;
+ }
+ for (size_t ii = 0;
+ ii < level_infos_.size() && (cube_complete_ || texture_complete_);
+ ++ii) {
+ const Texture::LevelInfo& level0 = level_infos_[ii][0];
+ if (level0.target == 0 ||
+ level0.width != first_face.width ||
+ level0.height != first_face.height ||
+ level0.depth != 1 ||
+ level0.internal_format != first_face.internal_format ||
+ level0.format != first_face.format ||
+ level0.type != first_face.type) {
+ cube_complete_ = false;
+ }
+ // Get level0 dimensions
+ GLsizei width = level0.width;
+ GLsizei height = level0.height;
+ GLsizei depth = level0.depth;
+ for (GLint jj = 1; jj < levels_needed; ++jj) {
+ // compute required size for mip.
+ width = std::max(1, width >> 1);
+ height = std::max(1, height >> 1);
+ depth = std::max(1, depth >> 1);
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.target == 0 ||
+ info.width != width ||
+ info.height != height ||
+ info.depth != depth ||
+ info.internal_format != level0.internal_format ||
+ info.format != level0.format ||
+ info.type != level0.type) {
+ texture_complete_ = false;
+ break;
+ }
+ }
+ }
+}
+
+bool Texture::ClearRenderableLevels(GLES2Decoder* decoder) {
+ DCHECK(decoder);
+ if (cleared_) {
+ return true;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (GLint jj = 0; jj < levels_needed; ++jj) {
+ Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.target != 0) {
+ if (!ClearLevel(decoder, info.target, jj)) {
+ return false;
+ }
+ }
+ }
+ }
+ UpdateSafeToRenderFrom(true);
+ return true;
+}
+
+bool Texture::IsLevelCleared(GLenum target, GLint level) const {
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (face_index >= level_infos_.size() ||
+ level >= static_cast<GLint>(level_infos_[face_index].size())) {
+ return true;
+ }
+
+ const Texture::LevelInfo& info = level_infos_[face_index][level];
+
+ return info.cleared;
+}
+
+void Texture::InitTextureMaxAnisotropyIfNeeded(GLenum target) {
+ if (texture_max_anisotropy_initialized_)
+ return;
+ texture_max_anisotropy_initialized_ = true;
+ GLfloat params[] = { 1.0f };
+ glTexParameterfv(target, GL_TEXTURE_MAX_ANISOTROPY_EXT, params);
+}
+
+bool Texture::ClearLevel(
+ GLES2Decoder* decoder, GLenum target, GLint level) {
+ DCHECK(decoder);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (face_index >= level_infos_.size() ||
+ level >= static_cast<GLint>(level_infos_[face_index].size())) {
+ return true;
+ }
+
+ Texture::LevelInfo& info = level_infos_[face_index][level];
+
+ DCHECK(target == info.target);
+
+ if (info.target == 0 ||
+ info.cleared ||
+ info.width == 0 ||
+ info.height == 0 ||
+ info.depth == 0) {
+ return true;
+ }
+
+ // NOTE: It seems kind of gross to call back into the decoder for this
+ // but only the decoder knows all the state (like unpack_alignment_) that's
+ // needed to be able to call GL correctly.
+ bool cleared = decoder->ClearLevel(
+ service_id_, target_, info.target, info.level, info.internal_format,
+ info.format, info.type, info.width, info.height, immutable_);
+ UpdateMipCleared(&info, cleared);
+ return info.cleared;
+}
+
+void Texture::SetLevelImage(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ DCHECK_EQ(info.target, target);
+ DCHECK_EQ(info.level, level);
+ info.image = image;
+ UpdateCanRenderCondition();
+ UpdateHasImages();
+}
+
+gfx::GLImage* Texture::GetLevelImage(GLint target, GLint level) const {
+ if (target != GL_TEXTURE_2D && target != GL_TEXTURE_EXTERNAL_OES &&
+ target != GL_TEXTURE_RECTANGLE_ARB) {
+ return NULL;
+ }
+
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ return info.image.get();
+ }
+ }
+ return NULL;
+}
+
+void Texture::OnWillModifyPixels() {
+ gfx::GLImage* image = GetLevelImage(target(), 0);
+ if (image)
+ image->WillModifyTexImage();
+}
+
+void Texture::OnDidModifyPixels() {
+ gfx::GLImage* image = GetLevelImage(target(), 0);
+ if (image)
+ image->DidModifyTexImage();
+}
+
+TextureRef::TextureRef(TextureManager* manager,
+ GLuint client_id,
+ Texture* texture)
+ : manager_(manager),
+ texture_(texture),
+ client_id_(client_id),
+ num_observers_(0) {
+ DCHECK(manager_);
+ DCHECK(texture_);
+ texture_->AddTextureRef(this);
+ manager_->StartTracking(this);
+}
+
+scoped_refptr<TextureRef> TextureRef::Create(TextureManager* manager,
+ GLuint client_id,
+ GLuint service_id) {
+ return new TextureRef(manager, client_id, new Texture(service_id));
+}
+
+TextureRef::~TextureRef() {
+ manager_->StopTracking(this);
+ texture_->RemoveTextureRef(this, manager_->have_context_);
+ manager_ = NULL;
+}
+
+TextureManager::TextureManager(MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ GLint max_texture_size,
+ GLint max_cube_map_texture_size,
+ bool use_default_textures)
+ : memory_tracker_managed_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kManaged)),
+ memory_tracker_unmanaged_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kUnmanaged)),
+ feature_info_(feature_info),
+ framebuffer_manager_(NULL),
+ max_texture_size_(max_texture_size),
+ max_cube_map_texture_size_(max_cube_map_texture_size),
+ max_levels_(ComputeMipMapCount(GL_TEXTURE_2D,
+ max_texture_size,
+ max_texture_size,
+ max_texture_size)),
+ max_cube_map_levels_(ComputeMipMapCount(GL_TEXTURE_CUBE_MAP,
+ max_cube_map_texture_size,
+ max_cube_map_texture_size,
+ max_cube_map_texture_size)),
+ use_default_textures_(use_default_textures),
+ num_unrenderable_textures_(0),
+ num_unsafe_textures_(0),
+ num_uncleared_mips_(0),
+ num_images_(0),
+ texture_count_(0),
+ have_context_(true) {
+ for (int ii = 0; ii < kNumDefaultTextures; ++ii) {
+ black_texture_ids_[ii] = 0;
+ }
+}
+
+bool TextureManager::Initialize() {
+ // TODO(gman): The default textures have to be real textures, not the 0
+ // texture because we simulate non shared resources on top of shared
+ // resources and all contexts that share resource share the same default
+ // texture.
+ default_textures_[kTexture2D] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_2D, &black_texture_ids_[kTexture2D]);
+ default_textures_[kCubeMap] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_CUBE_MAP, &black_texture_ids_[kCubeMap]);
+
+ if (feature_info_->feature_flags().oes_egl_image_external) {
+ default_textures_[kExternalOES] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_EXTERNAL_OES, &black_texture_ids_[kExternalOES]);
+ }
+
+ if (feature_info_->feature_flags().arb_texture_rectangle) {
+ default_textures_[kRectangleARB] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_RECTANGLE_ARB, &black_texture_ids_[kRectangleARB]);
+ }
+
+ return true;
+}
+
+scoped_refptr<TextureRef>
+ TextureManager::CreateDefaultAndBlackTextures(
+ GLenum target,
+ GLuint* black_texture) {
+ static uint8 black[] = {0, 0, 0, 255};
+
+ // Sampling a texture not associated with any EGLImage sibling will return
+ // black values according to the spec.
+ bool needs_initialization = (target != GL_TEXTURE_EXTERNAL_OES);
+ bool needs_faces = (target == GL_TEXTURE_CUBE_MAP);
+
+ // Make default textures and texture for replacing non-renderable textures.
+ GLuint ids[2];
+ const int num_ids = use_default_textures_ ? 2 : 1;
+ glGenTextures(num_ids, ids);
+ for (int ii = 0; ii < num_ids; ++ii) {
+ glBindTexture(target, ids[ii]);
+ if (needs_initialization) {
+ if (needs_faces) {
+ for (int jj = 0; jj < GLES2Util::kNumFaces; ++jj) {
+ glTexImage2D(GLES2Util::IndexToGLFaceTarget(jj), 0, GL_RGBA, 1, 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, black);
+ }
+ } else {
+ glTexImage2D(target, 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, black);
+ }
+ }
+ }
+ glBindTexture(target, 0);
+
+ scoped_refptr<TextureRef> default_texture;
+ if (use_default_textures_) {
+ default_texture = TextureRef::Create(this, 0, ids[1]);
+ SetTarget(default_texture.get(), target);
+ if (needs_faces) {
+ for (int ii = 0; ii < GLES2Util::kNumFaces; ++ii) {
+ SetLevelInfo(default_texture.get(),
+ GLES2Util::IndexToGLFaceTarget(ii),
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ }
+ } else {
+ if (needs_initialization) {
+ SetLevelInfo(default_texture.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ } else {
+ SetLevelInfo(default_texture.get(),
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ }
+ }
+ }
+
+ *black_texture = ids[0];
+ return default_texture;
+}
+
+bool TextureManager::ValidForTarget(
+ GLenum target, GLint level, GLsizei width, GLsizei height, GLsizei depth) {
+ GLsizei max_size = MaxSizeForTarget(target) >> level;
+ return level >= 0 &&
+ width >= 0 &&
+ height >= 0 &&
+ depth >= 0 &&
+ level < MaxLevelsForTarget(target) &&
+ width <= max_size &&
+ height <= max_size &&
+ depth <= max_size &&
+ (level == 0 || feature_info_->feature_flags().npot_ok ||
+ (!GLES2Util::IsNPOT(width) &&
+ !GLES2Util::IsNPOT(height) &&
+ !GLES2Util::IsNPOT(depth))) &&
+ (target != GL_TEXTURE_CUBE_MAP || (width == height && depth == 1)) &&
+ (target != GL_TEXTURE_2D || (depth == 1));
+}
+
+void TextureManager::SetTarget(TextureRef* ref, GLenum target) {
+ DCHECK(ref);
+ ref->texture()
+ ->SetTarget(feature_info_.get(), target, MaxLevelsForTarget(target));
+}
+
+void TextureManager::SetLevelCleared(TextureRef* ref,
+ GLenum target,
+ GLint level,
+ bool cleared) {
+ DCHECK(ref);
+ ref->texture()->SetLevelCleared(target, level, cleared);
+}
+
+bool TextureManager::ClearRenderableLevels(
+ GLES2Decoder* decoder, TextureRef* ref) {
+ DCHECK(ref);
+ return ref->texture()->ClearRenderableLevels(decoder);
+}
+
+bool TextureManager::ClearTextureLevel(
+ GLES2Decoder* decoder, TextureRef* ref,
+ GLenum target, GLint level) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ if (texture->num_uncleared_mips() == 0) {
+ return true;
+ }
+ bool result = texture->ClearLevel(decoder, target, level);
+ texture->UpdateCleared();
+ return result;
+}
+
+void TextureManager::SetLevelInfo(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+
+ texture->GetMemTracker()->TrackMemFree(texture->estimated_size());
+ texture->SetLevelInfo(feature_info_.get(),
+ target,
+ level,
+ internal_format,
+ width,
+ height,
+ depth,
+ border,
+ format,
+ type,
+ cleared);
+ texture->GetMemTracker()->TrackMemAlloc(texture->estimated_size());
+}
+
+Texture* TextureManager::Produce(TextureRef* ref) {
+ DCHECK(ref);
+ return ref->texture();
+}
+
+TextureRef* TextureManager::Consume(
+ GLuint client_id,
+ Texture* texture) {
+ DCHECK(client_id);
+ scoped_refptr<TextureRef> ref(new TextureRef(this, client_id, texture));
+ bool result = textures_.insert(std::make_pair(client_id, ref)).second;
+ DCHECK(result);
+ return ref.get();
+}
+
+void TextureManager::SetParameteri(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLint param) {
+ DCHECK(error_state);
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ GLenum result = texture->SetParameteri(feature_info_.get(), pname, param);
+ if (result != GL_NO_ERROR) {
+ if (result == GL_INVALID_ENUM) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, param, "param");
+ } else {
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAMI(
+ error_state, result, function_name, pname, param);
+ }
+ } else {
+ // Texture tracking pools exist only for the command decoder, so
+ // do not pass them on to the native GL implementation.
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ glTexParameteri(texture->target(), pname, param);
+ }
+ }
+}
+
+void TextureManager::SetParameterf(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLfloat param) {
+ DCHECK(error_state);
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ GLenum result = texture->SetParameterf(feature_info_.get(), pname, param);
+ if (result != GL_NO_ERROR) {
+ if (result == GL_INVALID_ENUM) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, param, "param");
+ } else {
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAMF(
+ error_state, result, function_name, pname, param);
+ }
+ } else {
+ // Texture tracking pools exist only for the command decoder, so
+ // do not pass them on to the native GL implementation.
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ glTexParameterf(texture->target(), pname, param);
+ }
+ }
+}
+
+bool TextureManager::MarkMipmapsGenerated(TextureRef* ref) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ texture->GetMemTracker()->TrackMemFree(texture->estimated_size());
+ bool result = texture->MarkMipmapsGenerated(feature_info_.get());
+ texture->GetMemTracker()->TrackMemAlloc(texture->estimated_size());
+ return result;
+}
+
+TextureRef* TextureManager::CreateTexture(
+ GLuint client_id, GLuint service_id) {
+ DCHECK_NE(0u, service_id);
+ scoped_refptr<TextureRef> ref(TextureRef::Create(
+ this, client_id, service_id));
+ std::pair<TextureMap::iterator, bool> result =
+ textures_.insert(std::make_pair(client_id, ref));
+ DCHECK(result.second);
+ return ref.get();
+}
+
+TextureRef* TextureManager::GetTexture(
+ GLuint client_id) const {
+ TextureMap::const_iterator it = textures_.find(client_id);
+ return it != textures_.end() ? it->second.get() : NULL;
+}
+
+void TextureManager::RemoveTexture(GLuint client_id) {
+ TextureMap::iterator it = textures_.find(client_id);
+ if (it != textures_.end()) {
+ it->second->reset_client_id();
+ textures_.erase(it);
+ }
+}
+
+void TextureManager::StartTracking(TextureRef* ref) {
+ Texture* texture = ref->texture();
+ ++texture_count_;
+ num_uncleared_mips_ += texture->num_uncleared_mips();
+ if (!texture->SafeToRenderFrom())
+ ++num_unsafe_textures_;
+ if (!texture->CanRender(feature_info_.get()))
+ ++num_unrenderable_textures_;
+ if (texture->HasImages())
+ ++num_images_;
+}
+
+void TextureManager::StopTracking(TextureRef* ref) {
+ if (ref->num_observers()) {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++) {
+ destruction_observers_[i]->OnTextureRefDestroying(ref);
+ }
+ DCHECK_EQ(ref->num_observers(), 0);
+ }
+
+ Texture* texture = ref->texture();
+
+ --texture_count_;
+ if (texture->HasImages()) {
+ DCHECK_NE(0, num_images_);
+ --num_images_;
+ }
+ if (!texture->CanRender(feature_info_.get())) {
+ DCHECK_NE(0, num_unrenderable_textures_);
+ --num_unrenderable_textures_;
+ }
+ if (!texture->SafeToRenderFrom()) {
+ DCHECK_NE(0, num_unsafe_textures_);
+ --num_unsafe_textures_;
+ }
+ num_uncleared_mips_ -= texture->num_uncleared_mips();
+ DCHECK_GE(num_uncleared_mips_, 0);
+}
+
+MemoryTypeTracker* TextureManager::GetMemTracker(GLenum tracking_pool) {
+ switch (tracking_pool) {
+ case GL_TEXTURE_POOL_MANAGED_CHROMIUM:
+ return memory_tracker_managed_.get();
+ break;
+ case GL_TEXTURE_POOL_UNMANAGED_CHROMIUM:
+ return memory_tracker_unmanaged_.get();
+ break;
+ default:
+ break;
+ }
+ NOTREACHED();
+ return NULL;
+}
+
+Texture* TextureManager::GetTextureForServiceId(GLuint service_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (TextureMap::const_iterator it = textures_.begin();
+ it != textures_.end(); ++it) {
+ Texture* texture = it->second->texture();
+ if (texture->service_id() == service_id)
+ return texture;
+ }
+ return NULL;
+}
+
+GLsizei TextureManager::ComputeMipMapCount(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth) {
+ switch (target) {
+ case GL_TEXTURE_EXTERNAL_OES:
+ return 1;
+ default:
+ return 1 +
+ base::bits::Log2Floor(std::max(std::max(width, height), depth));
+ }
+}
+
+void TextureManager::SetLevelImage(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image) {
+ DCHECK(ref);
+ ref->texture()->SetLevelImage(feature_info_.get(), target, level, image);
+}
+
+size_t TextureManager::GetSignatureSize() const {
+ return sizeof(TextureTag) + sizeof(TextureSignature);
+}
+
+void TextureManager::AddToSignature(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ std::string* signature) const {
+ ref->texture()->AddToSignature(feature_info_.get(), target, level, signature);
+}
+
+void TextureManager::UpdateSafeToRenderFrom(int delta) {
+ num_unsafe_textures_ += delta;
+ DCHECK_GE(num_unsafe_textures_, 0);
+}
+
+void TextureManager::UpdateUnclearedMips(int delta) {
+ num_uncleared_mips_ += delta;
+ DCHECK_GE(num_uncleared_mips_, 0);
+}
+
+void TextureManager::UpdateCanRenderCondition(
+ Texture::CanRenderCondition old_condition,
+ Texture::CanRenderCondition new_condition) {
+ if (old_condition == Texture::CAN_RENDER_NEVER ||
+ (old_condition == Texture::CAN_RENDER_ONLY_IF_NPOT &&
+ !feature_info_->feature_flags().npot_ok)) {
+ DCHECK_GT(num_unrenderable_textures_, 0);
+ --num_unrenderable_textures_;
+ }
+ if (new_condition == Texture::CAN_RENDER_NEVER ||
+ (new_condition == Texture::CAN_RENDER_ONLY_IF_NPOT &&
+ !feature_info_->feature_flags().npot_ok))
+ ++num_unrenderable_textures_;
+}
+
+void TextureManager::UpdateNumImages(int delta) {
+ num_images_ += delta;
+ DCHECK_GE(num_images_, 0);
+}
+
+void TextureManager::IncFramebufferStateChangeCount() {
+ if (framebuffer_manager_)
+ framebuffer_manager_->IncFramebufferStateChangeCount();
+}
+
+bool TextureManager::ValidateFormatAndTypeCombination(
+ ErrorState* error_state, const char* function_name, GLenum format,
+ GLenum type) {
+ if (!feature_info_->GetTextureFormatValidator(format).IsValid(type)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string("invalid type ") +
+ GLES2Util::GetStringEnum(type) + " for format " +
+ GLES2Util::GetStringEnum(format)).c_str());
+ return false;
+ }
+ return true;
+}
+
+bool TextureManager::ValidateTextureParameters(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type, GLenum internal_format, GLint level) {
+ const Validators* validators = feature_info_->validators();
+ if (!validators->texture_format.IsValid(format)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, format, "format");
+ return false;
+ }
+ if (!validators->pixel_type.IsValid(type)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, type, "type");
+ return false;
+ }
+ if (format != internal_format &&
+ !((internal_format == GL_RGBA32F && format == GL_RGBA) ||
+ (internal_format == GL_RGB32F && format == GL_RGB))) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "format != internalformat");
+ return false;
+ }
+ uint32 channels = GLES2Util::GetChannelsForFormat(format);
+ if ((channels & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && level) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string("invalid format ") + GLES2Util::GetStringEnum(format) +
+ " for level != 0").c_str());
+ return false;
+ }
+ return ValidateFormatAndTypeCombination(error_state, function_name,
+ format, type);
+}
+
+// Gets the texture id for a given target.
+TextureRef* TextureManager::GetTextureInfoForTarget(
+ ContextState* state, GLenum target) {
+ TextureUnit& unit = state->texture_units[state->active_texture_unit];
+ TextureRef* texture = NULL;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture = unit.bound_texture_2d.get();
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+ texture = unit.bound_texture_cube_map.get();
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture = unit.bound_texture_external_oes.get();
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture = unit.bound_texture_rectangle_arb.get();
+ break;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+ return texture;
+}
+
+TextureRef* TextureManager::GetTextureInfoForTargetUnlessDefault(
+ ContextState* state, GLenum target) {
+ TextureRef* texture = GetTextureInfoForTarget(state, target);
+ if (!texture)
+ return NULL;
+ if (texture == GetDefaultTextureInfo(target))
+ return NULL;
+ return texture;
+}
+
+bool TextureManager::ValidateTexImage2D(
+ ContextState* state,
+ const char* function_name,
+ const DoTextImage2DArguments& args,
+ TextureRef** texture_ref) {
+ ErrorState* error_state = state->GetErrorState();
+ const Validators* validators = feature_info_->validators();
+ if (!validators->texture_target.IsValid(args.target)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, args.target, "target");
+ return false;
+ }
+ if (!validators->texture_internal_format.IsValid(args.internal_format)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, args.internal_format,
+ "internalformat");
+ return false;
+ }
+ if (!ValidateTextureParameters(
+ error_state, function_name, args.format, args.type,
+ args.internal_format, args.level)) {
+ return false;
+ }
+ if (!ValidForTarget(args.target, args.level, args.width, args.height, 1) ||
+ args.border != 0) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, function_name,
+ "dimensions out of range");
+ return false;
+ }
+ if ((GLES2Util::GetChannelsForFormat(args.format) &
+ (GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && args.pixels) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION,
+ function_name, "can not supply data for depth or stencil textures");
+ return false;
+ }
+
+ TextureRef* local_texture_ref = GetTextureInfoForTarget(state, args.target);
+ if (!local_texture_ref) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "unknown texture for target");
+ return false;
+ }
+ if (local_texture_ref->texture()->IsImmutable()) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "texture is immutable");
+ return false;
+ }
+
+ if (!memory_tracker_managed_->EnsureGPUMemoryAvailable(args.pixels_size)) {
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_OUT_OF_MEMORY, function_name,
+ "out of memory");
+ return false;
+ }
+
+ // Write the TextureReference since this is valid.
+ *texture_ref = local_texture_ref;
+ return true;
+}
+
+void TextureManager::ValidateAndDoTexImage2D(
+ DecoderTextureState* texture_state,
+ ContextState* state,
+ DecoderFramebufferState* framebuffer_state,
+ const DoTextImage2DArguments& args) {
+ TextureRef* texture_ref;
+ if (!ValidateTexImage2D(state, "glTexImage2D", args, &texture_ref)) {
+ return;
+ }
+
+ DoTexImage2D(texture_state, state->GetErrorState(), framebuffer_state,
+ texture_ref, args);
+}
+
+void TextureManager::DoTexImage2D(
+ DecoderTextureState* texture_state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const DoTextImage2DArguments& args) {
+ Texture* texture = texture_ref->texture();
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ GLenum tex_type = 0;
+ GLenum tex_format = 0;
+ bool level_is_same =
+ texture->GetLevelSize(args.target, args.level, &tex_width, &tex_height) &&
+ texture->GetLevelType(args.target, args.level, &tex_type, &tex_format) &&
+ args.width == tex_width && args.height == tex_height &&
+ args.type == tex_type && args.format == tex_format;
+
+ if (level_is_same && !args.pixels) {
+ // Just set the level texture but mark the texture as uncleared.
+ SetLevelInfo(
+ texture_ref,
+ args.target, args.level, args.internal_format, args.width, args.height,
+ 1, args.border, args.format, args.type, false);
+ texture_state->tex_image_2d_failed = false;
+ return;
+ }
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state->clear_state_dirty = true;
+ }
+
+ if (texture_state->texsubimage2d_faster_than_teximage2d &&
+ level_is_same && args.pixels) {
+ {
+ ScopedTextureUploadTimer timer(texture_state);
+ glTexSubImage2D(args.target, args.level, 0, 0, args.width, args.height,
+ args.format, args.type, args.pixels);
+ }
+ SetLevelCleared(texture_ref, args.target, args.level, true);
+ texture_state->tex_image_2d_failed = false;
+ return;
+ }
+
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, "glTexImage2D");
+ {
+ ScopedTextureUploadTimer timer(texture_state);
+ glTexImage2D(
+ args.target, args.level, args.internal_format, args.width, args.height,
+ args.border, args.format, args.type, args.pixels);
+ }
+ GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, "glTexImage2D");
+ if (error == GL_NO_ERROR) {
+ SetLevelInfo(
+ texture_ref,
+ args.target, args.level, args.internal_format, args.width, args.height,
+ 1, args.border, args.format, args.type, args.pixels != NULL);
+ texture_state->tex_image_2d_failed = false;
+ }
+}
+
+ScopedTextureUploadTimer::ScopedTextureUploadTimer(
+ DecoderTextureState* texture_state)
+ : texture_state_(texture_state),
+ begin_time_(base::TimeTicks::HighResNow()) {
+}
+
+ScopedTextureUploadTimer::~ScopedTextureUploadTimer() {
+ texture_state_->texture_upload_count++;
+ texture_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time_;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/texture_manager.h b/gpu/command_buffer/service/texture_manager.h
new file mode 100644
index 0000000..df00607
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager.h
@@ -0,0 +1,833 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
+
+#include <algorithm>
+#include <list>
+#include <set>
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Decoder;
+struct ContextState;
+struct DecoderFramebufferState;
+class Display;
+class ErrorState;
+class FeatureInfo;
+class FramebufferManager;
+class MailboxManager;
+class TextureManager;
+class TextureRef;
+
+// Info about Textures currently in the system.
+// This class wraps a real GL texture, keeping track of its meta-data. It is
+// jointly owned by possibly multiple TextureRef.
+class GPU_EXPORT Texture {
+ public:
+ explicit Texture(GLuint service_id);
+
+ GLenum min_filter() const {
+ return min_filter_;
+ }
+
+ GLenum mag_filter() const {
+ return mag_filter_;
+ }
+
+ GLenum wrap_s() const {
+ return wrap_s_;
+ }
+
+ GLenum wrap_t() const {
+ return wrap_t_;
+ }
+
+ GLenum usage() const {
+ return usage_;
+ }
+
+ GLenum pool() const {
+ return pool_;
+ }
+
+ int num_uncleared_mips() const {
+ return num_uncleared_mips_;
+ }
+
+ uint32 estimated_size() const {
+ return estimated_size_;
+ }
+
+ bool CanRenderTo() const {
+ return target_ != GL_TEXTURE_EXTERNAL_OES;
+ }
+
+ // The service side OpenGL id of the texture.
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ void SetServiceId(GLuint service_id) {
+ DCHECK(service_id);
+ service_id_ = service_id;
+ }
+
+ // Returns the target this texure was first bound to or 0 if it has not
+ // been bound. Once a texture is bound to a specific target it can never be
+ // bound to a different target.
+ GLenum target() const {
+ return target_;
+ }
+
+ bool SafeToRenderFrom() const {
+ return cleared_;
+ }
+
+ // Get the width and height for a particular level. Returns false if level
+ // does not exist.
+ bool GetLevelSize(
+ GLint target, GLint level, GLsizei* width, GLsizei* height) const;
+
+ // Get the type of a level. Returns false if level does not exist.
+ bool GetLevelType(
+ GLint target, GLint level, GLenum* type, GLenum* internal_format) const;
+
+ // Get the image bound to a particular level. Returns NULL if level
+ // does not exist.
+ gfx::GLImage* GetLevelImage(GLint target, GLint level) const;
+
+ bool HasImages() const {
+ return has_images_;
+ }
+
+ // Returns true of the given dimensions are inside the dimensions of the
+ // level and if the type matches the level.
+ bool ValidForTexture(
+ GLint target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum type) const;
+
+ bool IsValid() const {
+ return !!target();
+ }
+
+ bool IsAttachedToFramebuffer() const {
+ return framebuffer_attachment_count_ != 0;
+ }
+
+ void AttachToFramebuffer() {
+ ++framebuffer_attachment_count_;
+ }
+
+ void DetachFromFramebuffer() {
+ DCHECK_GT(framebuffer_attachment_count_, 0);
+ --framebuffer_attachment_count_;
+ }
+
+ void SetImmutable(bool immutable) {
+ immutable_ = immutable;
+ }
+
+ bool IsImmutable() const {
+ return immutable_;
+ }
+
+ // Whether a particular level/face is cleared.
+ bool IsLevelCleared(GLenum target, GLint level) const;
+
+ // Whether the texture has been defined
+ bool IsDefined() const {
+ return estimated_size() > 0;
+ }
+
+ // Initialize TEXTURE_MAX_ANISOTROPY to 1 if we haven't done so yet.
+ void InitTextureMaxAnisotropyIfNeeded(GLenum target);
+
+ void OnWillModifyPixels();
+ void OnDidModifyPixels();
+
+ private:
+ friend class MailboxManager;
+ friend class MailboxManagerTest;
+ friend class TextureDefinition;
+ friend class TextureManager;
+ friend class TextureRef;
+ friend class TextureTestHelper;
+
+ ~Texture();
+ void AddTextureRef(TextureRef* ref);
+ void RemoveTextureRef(TextureRef* ref, bool have_context);
+ MemoryTypeTracker* GetMemTracker();
+
+ // Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it
+ // depends on context support for non-power-of-two textures (i.e. will be
+ // renderable if NPOT support is in the context, otherwise not, e.g. texture
+ // with a NPOT level). ALWAYS means it doesn't depend on context features
+ // (e.g. complete POT), NEVER means it's not renderable regardless (e.g.
+ // incomplete).
+ enum CanRenderCondition {
+ CAN_RENDER_ALWAYS,
+ CAN_RENDER_NEVER,
+ CAN_RENDER_ONLY_IF_NPOT
+ };
+
+ struct LevelInfo {
+ LevelInfo();
+ LevelInfo(const LevelInfo& rhs);
+ ~LevelInfo();
+
+ bool cleared;
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ scoped_refptr<gfx::GLImage> image;
+ uint32 estimated_size;
+ };
+
+ // Set the info for a particular level.
+ void SetLevelInfo(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+
+ // In GLES2 "texture complete" means it has all required mips for filtering
+ // down to a 1x1 pixel texture, they are in the correct order, they are all
+ // the same format.
+ bool texture_complete() const {
+ return texture_complete_;
+ }
+
+ // In GLES2 "cube complete" means all 6 faces level 0 are defined, all the
+ // same format, all the same dimensions and all width = height.
+ bool cube_complete() const {
+ return cube_complete_;
+ }
+
+ // Whether or not this texture is a non-power-of-two texture.
+ bool npot() const {
+ return npot_;
+ }
+
+ // Marks a particular level as cleared or uncleared.
+ void SetLevelCleared(GLenum target, GLint level, bool cleared);
+
+ // Updates the cleared flag for this texture by inspecting all the mips.
+ void UpdateCleared();
+
+ // Clears any renderable uncleared levels.
+ // Returns false if a GL error was generated.
+ bool ClearRenderableLevels(GLES2Decoder* decoder);
+
+ // Clears the level.
+ // Returns false if a GL error was generated.
+ bool ClearLevel(GLES2Decoder* decoder, GLenum target, GLint level);
+
+ // Sets a texture parameter.
+ // TODO(gman): Expand to SetParameteriv,fv
+ // Returns GL_NO_ERROR on success. Otherwise the error to generate.
+ GLenum SetParameteri(
+ const FeatureInfo* feature_info, GLenum pname, GLint param);
+ GLenum SetParameterf(
+ const FeatureInfo* feature_info, GLenum pname, GLfloat param);
+
+ // Makes each of the mip levels as though they were generated.
+ bool MarkMipmapsGenerated(const FeatureInfo* feature_info);
+
+ bool NeedsMips() const {
+ return min_filter_ != GL_NEAREST && min_filter_ != GL_LINEAR;
+ }
+
+ // True if this texture meets all the GLES2 criteria for rendering.
+ // See section 3.8.2 of the GLES2 spec.
+ bool CanRender(const FeatureInfo* feature_info) const;
+
+ // Returns true if mipmaps can be generated by GL.
+ bool CanGenerateMipmaps(const FeatureInfo* feature_info) const;
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
+ // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(
+ const FeatureInfo* feature_info, GLenum target, GLint max_levels);
+
+ // Update info about this texture.
+ void Update(const FeatureInfo* feature_info);
+
+ // Set the image for a particular level.
+ void SetLevelImage(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image);
+
+ // Appends a signature for the given level.
+ void AddToSignature(
+ const FeatureInfo* feature_info,
+ GLenum target, GLint level, std::string* signature) const;
+
+ void SetMailboxManager(MailboxManager* mailbox_manager);
+
+ // Updates the unsafe textures count in all the managers referencing this
+ // texture.
+ void UpdateSafeToRenderFrom(bool cleared);
+
+ // Updates the uncleared mip count in all the managers referencing this
+ // texture.
+ void UpdateMipCleared(LevelInfo* info, bool cleared);
+
+ // Computes the CanRenderCondition flag.
+ CanRenderCondition GetCanRenderCondition() const;
+
+ // Updates the unrenderable texture count in all the managers referencing this
+ // texture.
+ void UpdateCanRenderCondition();
+
+ // Updates the images count in all the managers referencing this
+ // texture.
+ void UpdateHasImages();
+
+ // Increment the framebuffer state change count in all the managers
+ // referencing this texture.
+ void IncAllFramebufferStateChangeCount();
+
+ MailboxManager* mailbox_manager_;
+
+ // Info about each face and level of texture.
+ std::vector<std::vector<LevelInfo> > level_infos_;
+
+ // The texture refs that point to this Texture.
+ typedef std::set<TextureRef*> RefSet;
+ RefSet refs_;
+
+ // The single TextureRef that accounts for memory for this texture. Must be
+ // one of refs_.
+ TextureRef* memory_tracking_ref_;
+
+ // The id of the texure
+ GLuint service_id_;
+
+ // Whether all renderable mips of this texture have been cleared.
+ bool cleared_;
+
+ int num_uncleared_mips_;
+
+ // The target. 0 if unset, otherwise GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP.
+ GLenum target_;
+
+ // Texture parameters.
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ GLenum pool_;
+
+ // The maximum level that has been set.
+ GLint max_level_set_;
+
+ // Whether or not this texture is "texture complete"
+ bool texture_complete_;
+
+ // Whether or not this texture is "cube complete"
+ bool cube_complete_;
+
+ // Whether or not this texture is non-power-of-two
+ bool npot_;
+
+ // Whether this texture has ever been bound.
+ bool has_been_bound_;
+
+ // The number of framebuffers this texture is attached to.
+ int framebuffer_attachment_count_;
+
+ // Whether the texture is immutable and no further changes to the format
+ // or dimensions of the texture object can be made.
+ bool immutable_;
+
+ // Whether or not this texture has images.
+ bool has_images_;
+
+ // Size in bytes this texture is assumed to take in memory.
+ uint32 estimated_size_;
+
+ // Cache of the computed CanRenderCondition flag.
+ CanRenderCondition can_render_condition_;
+
+ // Whether we have initialized TEXTURE_MAX_ANISOTROPY to 1.
+ bool texture_max_anisotropy_initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(Texture);
+};
+
+// This class represents a texture in a client context group. It's mostly 1:1
+// with a client id, though it can outlive the client id if it's still bound to
+// a FBO or another context when destroyed.
+// Multiple TextureRef can point to the same texture with cross-context sharing.
+class GPU_EXPORT TextureRef : public base::RefCounted<TextureRef> {
+ public:
+ TextureRef(TextureManager* manager, GLuint client_id, Texture* texture);
+ static scoped_refptr<TextureRef> Create(TextureManager* manager,
+ GLuint client_id,
+ GLuint service_id);
+
+ void AddObserver() { num_observers_++; }
+ void RemoveObserver() { num_observers_--; }
+
+ const Texture* texture() const { return texture_; }
+ Texture* texture() { return texture_; }
+ GLuint client_id() const { return client_id_; }
+ GLuint service_id() const { return texture_->service_id(); }
+ GLint num_observers() const { return num_observers_; }
+
+ private:
+ friend class base::RefCounted<TextureRef>;
+ friend class Texture;
+ friend class TextureManager;
+
+ ~TextureRef();
+ const TextureManager* manager() const { return manager_; }
+ TextureManager* manager() { return manager_; }
+ void reset_client_id() { client_id_ = 0; }
+
+ TextureManager* manager_;
+ Texture* texture_;
+ GLuint client_id_;
+ GLint num_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureRef);
+};
+
+// Holds data that is per gles2_cmd_decoder, but is related to to the
+// TextureManager.
+struct DecoderTextureState {
+ // total_texture_upload_time automatically initialized to 0 in default
+ // constructor.
+ explicit DecoderTextureState(bool texsubimage2d_faster_than_teximage2d)
+ : tex_image_2d_failed(false),
+ texture_upload_count(0),
+ texsubimage2d_faster_than_teximage2d(
+ texsubimage2d_faster_than_teximage2d) {}
+
+ // This indicates all the following texSubImage2D calls that are part of the
+ // failed texImage2D call should be ignored.
+ bool tex_image_2d_failed;
+
+ // Command buffer stats.
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+
+ bool texsubimage2d_faster_than_teximage2d;
+};
+
+// This class keeps track of the textures and their sizes so we can do NPOT and
+// texture complete checking.
+//
+// NOTE: To support shared resources an instance of this class will need to be
+// shared by multiple GLES2Decoders.
+class GPU_EXPORT TextureManager {
+ public:
+ class GPU_EXPORT DestructionObserver {
+ public:
+ DestructionObserver();
+ virtual ~DestructionObserver();
+
+ // Called in ~TextureManager.
+ virtual void OnTextureManagerDestroying(TextureManager* manager) = 0;
+
+ // Called via ~TextureRef.
+ virtual void OnTextureRefDestroying(TextureRef* texture) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DestructionObserver);
+ };
+
+ enum DefaultAndBlackTextures {
+ kTexture2D,
+ kCubeMap,
+ kExternalOES,
+ kRectangleARB,
+ kNumDefaultTextures
+ };
+
+ TextureManager(MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ GLsizei max_texture_size,
+ GLsizei max_cube_map_texture_size,
+ bool use_default_textures);
+ ~TextureManager();
+
+ void set_framebuffer_manager(FramebufferManager* manager) {
+ framebuffer_manager_ = manager;
+ }
+
+ // Init the texture manager.
+ bool Initialize();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Returns the maximum number of levels.
+ GLint MaxLevelsForTarget(GLenum target) const {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return max_levels_;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return 1;
+ default:
+ return max_cube_map_levels_;
+ }
+ }
+
+ // Returns the maximum size.
+ GLsizei MaxSizeForTarget(GLenum target) const {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
+ return max_texture_size_;
+ default:
+ return max_cube_map_texture_size_;
+ }
+ }
+
+ // Returns the maxium number of levels a texture of the given size can have.
+ static GLsizei ComputeMipMapCount(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth);
+
+ // Checks if a dimensions are valid for a given target.
+ bool ValidForTarget(
+ GLenum target, GLint level,
+ GLsizei width, GLsizei height, GLsizei depth);
+
+ // True if this texture meets all the GLES2 criteria for rendering.
+ // See section 3.8.2 of the GLES2 spec.
+ bool CanRender(const TextureRef* ref) const {
+ return ref->texture()->CanRender(feature_info_.get());
+ }
+
+ // Returns true if mipmaps can be generated by GL.
+ bool CanGenerateMipmaps(const TextureRef* ref) const {
+ return ref->texture()->CanGenerateMipmaps(feature_info_.get());
+ }
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(
+ TextureRef* ref,
+ GLenum target);
+
+ // Set the info for a particular level in a TexureInfo.
+ void SetLevelInfo(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+
+ // Adapter to call above function.
+ void SetLevelInfoFromParams(TextureRef* ref,
+ const gpu::AsyncTexImage2DParams& params) {
+ SetLevelInfo(
+ ref, params.target, params.level, params.internal_format,
+ params.width, params.height, 1 /* depth */,
+ params.border, params.format,
+ params.type, true /* cleared */);
+ }
+
+ Texture* Produce(TextureRef* ref);
+
+ // Maps an existing texture into the texture manager, at a given client ID.
+ TextureRef* Consume(GLuint client_id, Texture* texture);
+
+ // Sets a mip as cleared.
+ void SetLevelCleared(TextureRef* ref, GLenum target,
+ GLint level, bool cleared);
+
+ // Sets a texture parameter of a Texture
+ // Returns GL_NO_ERROR on success. Otherwise the error to generate.
+ // TODO(gman): Expand to SetParameteriv,fv
+ void SetParameteri(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLint param);
+ void SetParameterf(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLfloat param);
+
+ // Makes each of the mip levels as though they were generated.
+ // Returns false if that's not allowed for the given texture.
+ bool MarkMipmapsGenerated(TextureRef* ref);
+
+ // Clears any uncleared renderable levels.
+ bool ClearRenderableLevels(GLES2Decoder* decoder, TextureRef* ref);
+
+ // Clear a specific level.
+ bool ClearTextureLevel(
+ GLES2Decoder* decoder, TextureRef* ref, GLenum target, GLint level);
+
+ // Creates a new texture info.
+ TextureRef* CreateTexture(GLuint client_id, GLuint service_id);
+
+ // Gets the texture info for the given texture.
+ TextureRef* GetTexture(GLuint client_id) const;
+
+ // Removes a texture info.
+ void RemoveTexture(GLuint client_id);
+
+ // Gets a Texture for a given service id (note: it assumes the texture object
+ // is still mapped in this TextureManager).
+ Texture* GetTextureForServiceId(GLuint service_id) const;
+
+ TextureRef* GetDefaultTextureInfo(GLenum target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return default_textures_[kTexture2D].get();
+ case GL_TEXTURE_CUBE_MAP:
+ return default_textures_[kCubeMap].get();
+ case GL_TEXTURE_EXTERNAL_OES:
+ return default_textures_[kExternalOES].get();
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return default_textures_[kRectangleARB].get();
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+ }
+
+ bool HaveUnrenderableTextures() const {
+ return num_unrenderable_textures_ > 0;
+ }
+
+ bool HaveUnsafeTextures() const {
+ return num_unsafe_textures_ > 0;
+ }
+
+ bool HaveUnclearedMips() const {
+ return num_uncleared_mips_ > 0;
+ }
+
+ bool HaveImages() const {
+ return num_images_ > 0;
+ }
+
+ GLuint black_texture_id(GLenum target) const {
+ switch (target) {
+ case GL_SAMPLER_2D:
+ return black_texture_ids_[kTexture2D];
+ case GL_SAMPLER_CUBE:
+ return black_texture_ids_[kCubeMap];
+ case GL_SAMPLER_EXTERNAL_OES:
+ return black_texture_ids_[kExternalOES];
+ case GL_SAMPLER_2D_RECT_ARB:
+ return black_texture_ids_[kRectangleARB];
+ default:
+ NOTREACHED();
+ return 0;
+ }
+ }
+
+ size_t mem_represented() const {
+ return
+ memory_tracker_managed_->GetMemRepresented() +
+ memory_tracker_unmanaged_->GetMemRepresented();
+ }
+
+ void SetLevelImage(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image);
+
+ size_t GetSignatureSize() const;
+
+ void AddToSignature(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ std::string* signature) const;
+
+ void AddObserver(DestructionObserver* observer) {
+ destruction_observers_.push_back(observer);
+ }
+
+ void RemoveObserver(DestructionObserver* observer) {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++) {
+ if (destruction_observers_[i] == observer) {
+ std::swap(destruction_observers_[i], destruction_observers_.back());
+ destruction_observers_.pop_back();
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+
+ struct DoTextImage2DArguments {
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ const void* pixels;
+ uint32 pixels_size;
+ };
+
+ bool ValidateTexImage2D(
+ ContextState* state,
+ const char* function_name,
+ const DoTextImage2DArguments& args,
+ // Pointer to TextureRef filled in if validation successful.
+ // Presumes the pointer is valid.
+ TextureRef** texture_ref);
+
+ void ValidateAndDoTexImage2D(
+ DecoderTextureState* texture_state,
+ ContextState* state,
+ DecoderFramebufferState* framebuffer_state,
+ const DoTextImage2DArguments& args);
+
+ // TODO(kloveless): Make GetTexture* private once this is no longer called
+ // from gles2_cmd_decoder.
+ TextureRef* GetTextureInfoForTarget(ContextState* state, GLenum target);
+ TextureRef* GetTextureInfoForTargetUnlessDefault(
+ ContextState* state, GLenum target);
+
+ bool ValidateFormatAndTypeCombination(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type);
+
+ // Note that internal_format is only checked in relation to the format
+ // parameter, so that this function may be used to validate texSubImage2D.
+ bool ValidateTextureParameters(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type, GLenum internal_format, GLint level);
+
+ private:
+ friend class Texture;
+ friend class TextureRef;
+
+ // Helper for Initialize().
+ scoped_refptr<TextureRef> CreateDefaultAndBlackTextures(
+ GLenum target,
+ GLuint* black_texture);
+
+ void DoTexImage2D(
+ DecoderTextureState* texture_state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const DoTextImage2DArguments& args);
+
+ void StartTracking(TextureRef* texture);
+ void StopTracking(TextureRef* texture);
+
+ void UpdateSafeToRenderFrom(int delta);
+ void UpdateUnclearedMips(int delta);
+ void UpdateCanRenderCondition(Texture::CanRenderCondition old_condition,
+ Texture::CanRenderCondition new_condition);
+ void UpdateNumImages(int delta);
+ void IncFramebufferStateChangeCount();
+
+ MemoryTypeTracker* GetMemTracker(GLenum texture_pool);
+ scoped_ptr<MemoryTypeTracker> memory_tracker_managed_;
+ scoped_ptr<MemoryTypeTracker> memory_tracker_unmanaged_;
+
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ FramebufferManager* framebuffer_manager_;
+
+ // Info for each texture in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<TextureRef> > TextureMap;
+ TextureMap textures_;
+
+ GLsizei max_texture_size_;
+ GLsizei max_cube_map_texture_size_;
+ GLint max_levels_;
+ GLint max_cube_map_levels_;
+
+ const bool use_default_textures_;
+
+ int num_unrenderable_textures_;
+ int num_unsafe_textures_;
+ int num_uncleared_mips_;
+ int num_images_;
+
+ // Counts the number of Textures allocated with 'this' as its manager.
+ // Allows to check no Texture will outlive this.
+ unsigned int texture_count_;
+
+ bool have_context_;
+
+ // Black (0,0,0,1) textures for when non-renderable textures are used.
+ // NOTE: There is no corresponding Texture for these textures.
+ // TextureInfos are only for textures the client side can access.
+ GLuint black_texture_ids_[kNumDefaultTextures];
+
+ // The default textures for each target (texture name = 0)
+ scoped_refptr<TextureRef> default_textures_[kNumDefaultTextures];
+
+ std::vector<DestructionObserver*> destruction_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureManager);
+};
+
+// This class records texture upload time when in scope.
+class ScopedTextureUploadTimer {
+ public:
+ explicit ScopedTextureUploadTimer(DecoderTextureState* texture_state);
+ ~ScopedTextureUploadTimer();
+
+ private:
+ DecoderTextureState* texture_state_;
+ base::TimeTicks begin_time_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTextureUploadTimer);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
diff --git a/gpu/command_buffer/service/texture_manager_unittest.cc b/gpu/command_buffer/service/texture_manager_unittest.cc
new file mode 100644
index 0000000..2d509ae
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -0,0 +1,2509 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_manager.h"
+
+#include <utility>
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::AtLeast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArgumentPointee;
+using ::testing::StrictMock;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class TextureTestHelper {
+ public:
+ static bool IsNPOT(const Texture* texture) {
+ return texture->npot();
+ }
+ static bool IsTextureComplete(const Texture* texture) {
+ return texture->texture_complete();
+ }
+ static bool IsCubeComplete(const Texture* texture) {
+ return texture->cube_complete();
+ }
+};
+
+class TextureManagerTest : public GpuServiceTest {
+ public:
+ static const GLint kMaxTextureSize = 16;
+ static const GLint kMaxCubeMapTextureSize = 8;
+ static const GLint kMaxExternalTextureSize = 16;
+ static const GLint kMax2dLevels = 5;
+ static const GLint kMaxCubeMapLevels = 4;
+ static const GLint kMaxExternalLevels = 1;
+ static const bool kUseDefaultTextures = false;
+
+ TextureManagerTest() : feature_info_(new FeatureInfo()) {}
+
+ virtual ~TextureManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ manager_.reset(new TextureManager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ manager_->Initialize();
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(false);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ void SetParameter(
+ TextureRef* texture_ref, GLenum pname, GLint value, GLenum error) {
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), manager_.get(),
+ texture_ref, pname, value, error);
+ }
+
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint TextureManagerTest::kMaxTextureSize;
+const GLint TextureManagerTest::kMaxCubeMapTextureSize;
+const GLint TextureManagerTest::kMaxExternalTextureSize;
+const GLint TextureManagerTest::kMax2dLevels;
+const GLint TextureManagerTest::kMaxCubeMapLevels;
+const GLint TextureManagerTest::kMaxExternalLevels;
+#endif
+
+TEST_F(TextureManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ // Check we can create texture.
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ scoped_refptr<TextureRef> texture = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture.get() != NULL);
+ EXPECT_EQ(kService1Id, texture->service_id());
+ EXPECT_EQ(kClient1Id, texture->client_id());
+ EXPECT_EQ(texture->texture(), manager_->GetTextureForServiceId(
+ texture->service_id()));
+ // Check we get nothing for a non-existent texture.
+ EXPECT_TRUE(manager_->GetTexture(kClient2Id) == NULL);
+ // Check trying to a remove non-existent textures does not crash.
+ manager_->RemoveTexture(kClient2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the texture after we remove it.
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(manager_->GetTexture(kClient1Id) == NULL);
+ EXPECT_EQ(0u, texture->client_id());
+}
+
+TEST_F(TextureManagerTest, SetParameter) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create texture.
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture_ref = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ manager_->SetTarget(texture_ref, GL_TEXTURE_2D);
+ SetParameter(texture_ref, GL_TEXTURE_MIN_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->mag_filter());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_s());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_t());
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1, GL_NO_ERROR);
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 2, GL_NO_ERROR);
+ SetParameter(
+ texture_ref, GL_TEXTURE_MIN_FILTER, GL_CLAMP_TO_EDGE, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_CLAMP_TO_EDGE, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_S, GL_NEAREST, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_s());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_T, GL_NEAREST, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_t());
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 0, GL_INVALID_VALUE);
+}
+
+TEST_F(TextureManagerTest, UseDefaultTexturesTrue) {
+ bool use_default_textures = true;
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", use_default_textures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ use_default_textures);
+ manager.Initialize();
+
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_2D) != NULL);
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP) != NULL);
+
+ // TODO(vmiura): Test GL_TEXTURE_EXTERNAL_OES & GL_TEXTURE_RECTANGLE_ARB.
+
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, UseDefaultTexturesFalse) {
+ bool use_default_textures = false;
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", use_default_textures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ use_default_textures);
+ manager.Initialize();
+
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_2D) == NULL);
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP) == NULL);
+
+ // TODO(vmiura): Test GL_TEXTURE_EXTERNAL_OES & GL_TEXTURE_RECTANGLE_ARB.
+
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, TextureUsageExt) {
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", kUseDefaultTextures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.Initialize();
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create texture.
+ manager.CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_USAGE_ANGLE, GL_FRAMEBUFFER_ATTACHMENT_ANGLE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_ATTACHMENT_ANGLE),
+ texture_ref->texture()->usage());
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.Initialize();
+ // Check we can create texture.
+ manager.CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture != NULL);
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ TestHelper::SetupTextureManagerDestructionExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ manager.Destroy(true);
+ // Check that resources got freed.
+ texture = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture == NULL);
+}
+
+TEST_F(TextureManagerTest, MaxValues) {
+ // Check we get the right values for the max sizes.
+ EXPECT_EQ(kMax2dLevels, manager_->MaxLevelsForTarget(GL_TEXTURE_2D));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_X));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_X));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Y));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Z));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z));
+ EXPECT_EQ(kMaxExternalLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_EQ(kMaxTextureSize, manager_->MaxSizeForTarget(GL_TEXTURE_2D));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_X));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_X));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Y));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Z));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z));
+ EXPECT_EQ(kMaxExternalTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_EXTERNAL_OES));
+}
+
+TEST_F(TextureManagerTest, ValidForTarget) {
+ // check 2d
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, 0, kMaxTextureSize, kMaxTextureSize, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 1, 2, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 2, 1, 1));
+ // check level out of range.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels, kMaxTextureSize, 1, 1));
+ // check has depth.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels, kMaxTextureSize, 1, 2));
+ // Check NPOT width on level 0
+ EXPECT_TRUE(manager_->ValidForTarget(GL_TEXTURE_2D, 0, 5, 2, 1));
+ // Check NPOT height on level 0
+ EXPECT_TRUE(manager_->ValidForTarget(GL_TEXTURE_2D, 0, 2, 5, 1));
+ // Check NPOT width on level 1
+ EXPECT_FALSE(manager_->ValidForTarget(GL_TEXTURE_2D, 1, 5, 2, 1));
+ // Check NPOT height on level 1
+ EXPECT_FALSE(manager_->ValidForTarget(GL_TEXTURE_2D, 1, 2, 5, 1));
+
+ // check cube
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, 0,
+ kMaxCubeMapTextureSize, kMaxCubeMapTextureSize, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels - 1, 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels - 1, 2, 2, 1));
+ // check level out of range.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 1));
+ // check not square.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 1));
+ // check has depth.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 2));
+
+ for (GLint level = 0; level < kMax2dLevels; ++level) {
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, kMaxTextureSize >> level, 1, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, 1, kMaxTextureSize >> level, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, (kMaxTextureSize >> level) + 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, 1, (kMaxTextureSize >> level) + 1, 1));
+ }
+
+ for (GLint level = 0; level < kMaxCubeMapLevels; ++level) {
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, level,
+ kMaxCubeMapTextureSize >> level,
+ kMaxCubeMapTextureSize >> level,
+ 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, level,
+ (kMaxCubeMapTextureSize >> level) * 2,
+ (kMaxCubeMapTextureSize >> level) * 2,
+ 1));
+ }
+}
+
+TEST_F(TextureManagerTest, ValidForTargetNPOT) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_npot");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ // Check NPOT width on level 0
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 0, 5, 2, 1));
+ // Check NPOT height on level 0
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 0, 2, 5, 1));
+ // Check NPOT width on level 1
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 1, 5, 2, 1));
+ // Check NPOT height on level 1
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 1, 2, 5, 1));
+ manager.Destroy(false);
+}
+
+class TextureTestBase : public GpuServiceTest {
+ public:
+ static const GLint kMaxTextureSize = 16;
+ static const GLint kMaxCubeMapTextureSize = 8;
+ static const GLint kMax2dLevels = 5;
+ static const GLint kMaxCubeMapLevels = 4;
+ static const GLuint kClient1Id = 1;
+ static const GLuint kService1Id = 11;
+ static const bool kUseDefaultTextures = false;
+
+ TextureTestBase()
+ : feature_info_(new FeatureInfo()) {
+ }
+ virtual ~TextureTestBase() {
+ texture_ref_ = NULL;
+ }
+
+ protected:
+ void SetUpBase(MemoryTracker* memory_tracker, std::string extensions) {
+ GpuServiceTest::SetUp();
+ if (!extensions.empty()) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(),
+ extensions.c_str());
+ feature_info_->Initialize();
+ }
+
+ manager_.reset(new TextureManager(memory_tracker,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ decoder_.reset(new ::testing::StrictMock<gles2::MockGLES2Decoder>());
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ texture_ref_ = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref_.get() != NULL);
+ }
+
+ virtual void TearDown() {
+ if (texture_ref_.get()) {
+ // If it's not in the manager then setting texture_ref_ to NULL will
+ // delete the texture.
+ if (!texture_ref_->client_id()) {
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_,
+ DeleteTextures(1, ::testing::Pointee(texture_ref_->service_id())))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ texture_ref_ = NULL;
+ }
+ manager_->Destroy(false);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ void SetParameter(
+ TextureRef* texture_ref, GLenum pname, GLint value, GLenum error) {
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), manager_.get(),
+ texture_ref, pname, value, error);
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_ptr<MockErrorState> error_state_;
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> manager_;
+ scoped_refptr<TextureRef> texture_ref_;
+};
+
+class TextureTest : public TextureTestBase {
+ protected:
+ virtual void SetUp() {
+ SetUpBase(NULL, std::string());
+ }
+};
+
+class TextureMemoryTrackerTest : public TextureTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ SetUpBase(mock_memory_tracker_.get(), std::string());
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+TEST_F(TextureTest, Basic) {
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(0u, texture->target());
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(texture->IsImmutable());
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST_MIPMAP_LINEAR),
+ texture->min_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_LINEAR), texture->mag_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_REPEAT), texture->wrap_s());
+ EXPECT_EQ(static_cast<GLenum>(GL_REPEAT), texture->wrap_t());
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_EQ(0u, texture->estimated_size());
+}
+
+TEST_F(TextureTest, SetTargetTexture2D) {
+ Texture* texture = texture_ref_->texture();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(texture->IsImmutable());
+}
+
+TEST_F(TextureTest, SetTargetTextureExternalOES) {
+ Texture* texture = texture_ref_->texture();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(texture->IsImmutable());
+}
+
+TEST_F(TextureTest, ZeroSizeCanNotRender) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+}
+
+TEST_F(TextureTest, EstimatedSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(8u * 4u * 4u, texture_ref_->texture()->estimated_size());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 2,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(8u * 4u * 4u * 2u, texture_ref_->texture()->estimated_size());
+}
+
+TEST_F(TextureMemoryTrackerTest, EstimatedSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 256, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 2,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Add expectation for texture deletion.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(256, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+}
+
+TEST_F(TextureMemoryTrackerTest, SetParameterPool) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kManaged);
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM,
+ GL_NO_ERROR);
+ // Add expectation for texture deletion.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kManaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kManaged);
+}
+
+TEST_F(TextureTest, POT2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to POT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Set filters to something that will work with a single mip.
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_LINEAR, GL_NO_ERROR);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set them back.
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR_MIPMAP_LINEAR,
+ GL_NO_ERROR);
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Change a mip.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Set a level past the number of mips that would get generated.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 3,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureMemoryTrackerTest, MarkMipmapsGenerated) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 64, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(64, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 84, MemoryTracker::kUnmanaged);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_MEMORY_ALLOCATION_CHANGE(84, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+}
+
+TEST_F(TextureTest, UnusedMips) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Set level zero to large size.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set level zero to large smaller (levels unused mips)
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set an unused level to some size
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 4,
+ GL_RGBA,
+ 16,
+ 16,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureTest, NPOT2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to NPOT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_LINEAR, GL_NO_ERROR);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Change it to POT.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureTest, NPOT2DNPOTOK) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_npot");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to NPOT
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 4, 5, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, true);
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager.CanGenerateMipmaps(texture_ref));
+ EXPECT_FALSE(manager.CanRender(texture_ref));
+ EXPECT_TRUE(manager.HaveUnrenderableTextures());
+ EXPECT_TRUE(manager.MarkMipmapsGenerated(texture_ref));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager.CanRender(texture_ref));
+ EXPECT_FALSE(manager.HaveUnrenderableTextures());
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, POTCubeMap) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_CUBE_MAP);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_CUBE_MAP), texture->target());
+ // Check Setting level 0 each face to POT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+
+ // Change a mip.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Set a level past the number of mips that would get generated.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 3,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+}
+
+TEST_F(TextureTest, GetLevelSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ GLsizei width = -1;
+ GLsizei height = -1;
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, -1, &width, &height));
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 1000, &width, &height));
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(4, width);
+ EXPECT_EQ(5, height);
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(4, width);
+ EXPECT_EQ(5, height);
+}
+
+TEST_F(TextureTest, GetLevelType) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ GLenum type = 0;
+ GLenum format = 0;
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, -1, &type, &format));
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, 1000, &type, &format));
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &format));
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &format));
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), format);
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &format));
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), format);
+}
+
+TEST_F(TextureTest, ValidForTexture) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Check bad face.
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad level.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 0, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad xoffset.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, -1, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad xoffset + width > width.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 1, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad yoffset.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, -1, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad yoffset + height > height.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 1, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad width.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 5, 5, GL_UNSIGNED_BYTE));
+ // Check bad height.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 6, GL_UNSIGNED_BYTE));
+ // Check bad type.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_SHORT_4_4_4_4));
+ // Check valid full size
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check valid particial size.
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 1, 1, 2, 3, GL_UNSIGNED_BYTE));
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+}
+
+TEST_F(TextureTest, FloatNotLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_float");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_FLOAT, true);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager,
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST, GL_NO_ERROR);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, FloatLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_float GL_OES_texture_float_linear");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_FLOAT, true);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, HalfFloatNotLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_half_float");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_HALF_FLOAT_OES, true);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager,
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST, GL_NO_ERROR);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, HalfFloatLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_half_float GL_OES_texture_half_float_linear");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_HALF_FLOAT_OES, true);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, EGLImageExternal) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_EGL_image_external");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_EXTERNAL_OES);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES), texture->target());
+ EXPECT_FALSE(manager.CanGenerateMipmaps(texture_ref));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, DepthTexture) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_ANGLE_depth_texture");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ manager.SetLevelInfo(
+ texture_ref, GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 4, 4, 1, 0,
+ GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, false);
+ EXPECT_FALSE(manager.CanGenerateMipmaps(texture_ref));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, SafeUnsafe) {
+ static const GLuint kClient2Id = 2;
+ static const GLuint kService2Id = 12;
+ static const GLuint kClient3Id = 3;
+ static const GLuint kService3Id = 13;
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 1, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 1, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->MarkMipmapsGenerated(texture_ref_.get());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ scoped_refptr<TextureRef> texture_ref2(
+ manager_->GetTexture(kClient2Id));
+ ASSERT_TRUE(texture_ref2.get() != NULL);
+ manager_->SetTarget(texture_ref2.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ Texture* texture2 = texture_ref2->texture();
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture2->num_uncleared_mips());
+
+ manager_->CreateTexture(kClient3Id, kService3Id);
+ scoped_refptr<TextureRef> texture_ref3(
+ manager_->GetTexture(kClient3Id));
+ ASSERT_TRUE(texture_ref3.get() != NULL);
+ manager_->SetTarget(texture_ref3.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref3.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ Texture* texture3 = texture_ref3->texture();
+ EXPECT_EQ(1, texture3->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref2.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref3.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture3->num_uncleared_mips());
+
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref3.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture2->num_uncleared_mips());
+ EXPECT_EQ(1, texture3->num_uncleared_mips());
+ manager_->RemoveTexture(kClient3Id);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ manager_->RemoveTexture(kClient2Id);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService2Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref2 = NULL;
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService3Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref3 = NULL;
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+}
+
+TEST_F(TextureTest, ClearTexture) {
+ EXPECT_CALL(*decoder_, ClearLevel(_, _, _, _, _, _, _, _, _, _))
+ .WillRepeatedly(Return(true));
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->ClearRenderableLevels(decoder_.get(), texture_ref_.get());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->ClearTextureLevel(
+ decoder_.get(), texture_ref_.get(), GL_TEXTURE_2D, 0);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->ClearTextureLevel(
+ decoder_.get(), texture_ref_.get(), GL_TEXTURE_2D, 1);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+}
+
+TEST_F(TextureTest, UseDeletedTexture) {
+ static const GLuint kClient2Id = 2;
+ static const GLuint kService2Id = 12;
+ // Make the default texture renderable
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Make a new texture
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ scoped_refptr<TextureRef> texture_ref(
+ manager_->GetTexture(kClient2Id));
+ manager_->SetTarget(texture_ref.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->CanRender(texture_ref.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Remove it.
+ manager_->RemoveTexture(kClient2Id);
+ EXPECT_FALSE(manager_->CanRender(texture_ref.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Check that we can still manipulate it and it effects the manager.
+ manager_->SetLevelInfo(texture_ref.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->CanRender(texture_ref.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService2Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref = NULL;
+}
+
+TEST_F(TextureTest, GetLevelImage) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ // Set image.
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, image.get());
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ // Remove it.
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, NULL);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, image.get());
+ // Image should be reset when SetLevelInfo is called.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+}
+
+namespace {
+
+bool InSet(std::set<std::string>* string_set, const std::string& str) {
+ std::pair<std::set<std::string>::iterator, bool> result =
+ string_set->insert(str);
+ return !result.second;
+}
+
+} // anonymous namespace
+
+TEST_F(TextureTest, AddToSignature) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ std::string signature1;
+ std::string signature2;
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature1);
+
+ std::set<std::string> string_set;
+ EXPECT_FALSE(InSet(&string_set, signature1));
+
+ // check changing 1 thing makes a different signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // check putting it back makes the same signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check setting cleared status does not change signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check changing other settings changes signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // put it back
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // check changing parameters changes signature.
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_NEAREST, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_LINEAR,
+ GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MAG_FILTER, GL_LINEAR, GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_REPEAT, GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // Check putting it back genenerates the same signature
+ SetParameter(texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_REPEAT, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check the set was acutally getting different signatures.
+ EXPECT_EQ(11u, string_set.size());
+}
+
+class ProduceConsumeTextureTest : public TextureTest,
+ public ::testing::WithParamInterface<GLenum> {
+ public:
+ virtual void SetUp() {
+ TextureTest::SetUpBase(NULL, "GL_OES_EGL_image_external");
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ texture2_ = manager_->GetTexture(kClient2Id);
+
+ EXPECT_CALL(*decoder_.get(), GetErrorState())
+ .WillRepeatedly(Return(error_state_.get()));
+ }
+
+ virtual void TearDown() {
+ if (texture2_.get()) {
+ // If it's not in the manager then setting texture2_ to NULL will
+ // delete the texture.
+ if (!texture2_->client_id()) {
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(
+ *gl_,
+ DeleteTextures(1, ::testing::Pointee(texture2_->service_id())))
+ .Times(1).RetiresOnSaturation();
+ }
+ texture2_ = NULL;
+ }
+ TextureTest::TearDown();
+ }
+
+ protected:
+ struct LevelInfo {
+ LevelInfo(GLenum target,
+ GLenum format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum type,
+ bool cleared)
+ : target(target),
+ format(format),
+ width(width),
+ height(height),
+ depth(depth),
+ border(border),
+ type(type),
+ cleared(cleared) {}
+
+ LevelInfo()
+ : target(0),
+ format(0),
+ width(-1),
+ height(-1),
+ depth(1),
+ border(0),
+ type(0),
+ cleared(false) {}
+
+ bool operator==(const LevelInfo& other) const {
+ return target == other.target && format == other.format &&
+ width == other.width && height == other.height &&
+ depth == other.depth && border == other.border &&
+ type == other.type && cleared == other.cleared;
+ }
+
+ GLenum target;
+ GLenum format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum type;
+ bool cleared;
+ };
+
+ void SetLevelInfo(TextureRef* texture_ref,
+ GLint level,
+ const LevelInfo& info) {
+ manager_->SetLevelInfo(texture_ref,
+ info.target,
+ level,
+ info.format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.cleared);
+ }
+
+ static LevelInfo GetLevelInfo(const TextureRef* texture_ref,
+ GLint target,
+ GLint level) {
+ const Texture* texture = texture_ref->texture();
+ LevelInfo info;
+ info.target = target;
+ EXPECT_TRUE(texture->GetLevelSize(target, level, &info.width,
+ &info.height));
+ EXPECT_TRUE(texture->GetLevelType(target, level, &info.type,
+ &info.format));
+ info.cleared = texture->IsLevelCleared(target, level);
+ return info;
+ }
+
+ Texture* Produce(TextureRef* texture_ref) {
+ Texture* texture = manager_->Produce(texture_ref);
+ EXPECT_TRUE(texture != NULL);
+ return texture;
+ }
+
+ void Consume(GLuint client_id, Texture* texture) {
+ EXPECT_TRUE(manager_->Consume(client_id, texture));
+ }
+
+ scoped_refptr<TextureRef> texture2_;
+
+ private:
+ static const GLuint kClient2Id;
+ static const GLuint kService2Id;
+};
+
+const GLuint ProduceConsumeTextureTest::kClient2Id = 2;
+const GLuint ProduceConsumeTextureTest::kService2Id = 12;
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsume2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_2D, GL_RGBA, 4, 4, 1, 0, GL_UNSIGNED_BYTE, true);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ LevelInfo level1 = GetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 1);
+ LevelInfo level2 = GetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 2);
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ // Make this texture bigger with more levels, and make sure they get
+ // clobbered correctly during Consume().
+ manager_->SetTarget(texture2_.get(), GL_TEXTURE_2D);
+ SetLevelInfo(
+ texture2_.get(),
+ 0,
+ LevelInfo(GL_TEXTURE_2D, GL_RGBA, 16, 16, 1, 0, GL_UNSIGNED_BYTE, false));
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture2_.get()));
+ texture = texture2_->texture();
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_EQ(1024U + 256U + 64U + 16U + 4U, texture->estimated_size());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(level0, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 0));
+ EXPECT_EQ(level1, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 1));
+ EXPECT_EQ(level2, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 2));
+ texture = restored_texture->texture();
+ EXPECT_EQ(64U + 16U + 4U, texture->estimated_size());
+ GLint w, h;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 3, &w, &h));
+
+ // However the old texture ref still exists if it was referenced somewhere.
+ EXPECT_EQ(1024U + 256U + 64U + 16U + 4U,
+ texture2_->texture()->estimated_size());
+}
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeClearRectangle) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_RECTANGLE_ARB);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_RECTANGLE_ARB), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_RECTANGLE_ARB, GL_RGBA, 1, 1, 1, 0, GL_UNSIGNED_BYTE, false);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_RECTANGLE_ARB),
+ produced_texture->target());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+
+ // See if we can clear the previously uncleared level now.
+ EXPECT_EQ(level0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_RECTANGLE_ARB, 0));
+ EXPECT_CALL(*decoder_, ClearLevel(_, _, _, _, _, _, _, _, _, _))
+ .WillRepeatedly(Return(true));
+ EXPECT_TRUE(manager_->ClearTextureLevel(
+ decoder_.get(), restored_texture.get(), GL_TEXTURE_RECTANGLE_ARB, 0));
+}
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeExternal) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_EXTERNAL_OES, GL_RGBA, 1, 1, 1, 0, GL_UNSIGNED_BYTE, false);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(level0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_EXTERNAL_OES, 0));
+}
+
+TEST_P(ProduceConsumeTextureTest, ProduceConsumeTextureWithImage) {
+ GLenum target = GetParam();
+ manager_->SetTarget(texture_ref_.get(), target);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(target), texture->target());
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ target,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ manager_->SetLevelImage(texture_ref_.get(), target, 0, image.get());
+ GLuint service_id = texture->service_id();
+ Texture* produced_texture = Produce(texture_ref_.get());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(service_id, restored_texture->service_id());
+ EXPECT_EQ(image.get(), restored_texture->texture()->GetLevelImage(target, 0));
+}
+
+static const GLenum kTextureTargets[] = {GL_TEXTURE_2D, GL_TEXTURE_EXTERNAL_OES,
+ GL_TEXTURE_RECTANGLE_ARB, };
+
+INSTANTIATE_TEST_CASE_P(Target,
+ ProduceConsumeTextureTest,
+ ::testing::ValuesIn(kTextureTargets));
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeCube) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_CUBE_MAP);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_CUBE_MAP), texture->target());
+ LevelInfo face0(GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_UNSIGNED_BYTE,
+ true);
+ LevelInfo face5(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ GL_RGBA,
+ 3,
+ 3,
+ 1,
+ 0,
+ GL_UNSIGNED_BYTE,
+ true);
+ SetLevelInfo(texture_ref_.get(), 0, face0);
+ SetLevelInfo(texture_ref_.get(), 0, face5);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(
+ face0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0));
+ EXPECT_EQ(
+ face5,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0));
+}
+
+class CountingMemoryTracker : public MemoryTracker {
+ public:
+ CountingMemoryTracker() {
+ current_size_[0] = 0;
+ current_size_[1] = 0;
+ }
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) OVERRIDE {
+ DCHECK_LT(static_cast<size_t>(pool), arraysize(current_size_));
+ current_size_[pool] += new_size - old_size;
+ }
+
+ virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
+ return true;
+ }
+
+ size_t GetSize(Pool pool) {
+ DCHECK_LT(static_cast<size_t>(pool), arraysize(current_size_));
+ return current_size_[pool];
+ }
+
+ private:
+ virtual ~CountingMemoryTracker() {}
+
+ size_t current_size_[2];
+ DISALLOW_COPY_AND_ASSIGN(CountingMemoryTracker);
+};
+
+class SharedTextureTest : public GpuServiceTest {
+ public:
+ static const bool kUseDefaultTextures = false;
+
+ SharedTextureTest() : feature_info_(new FeatureInfo()) {}
+
+ virtual ~SharedTextureTest() {
+ }
+
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ memory_tracker1_ = new CountingMemoryTracker;
+ texture_manager1_.reset(
+ new TextureManager(memory_tracker1_.get(),
+ feature_info_.get(),
+ TextureManagerTest::kMaxTextureSize,
+ TextureManagerTest::kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ memory_tracker2_ = new CountingMemoryTracker;
+ texture_manager2_.reset(
+ new TextureManager(memory_tracker2_.get(),
+ feature_info_.get(),
+ TextureManagerTest::kMaxTextureSize,
+ TextureManagerTest::kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ texture_manager1_->Initialize();
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ texture_manager2_->Initialize();
+ }
+
+ virtual void TearDown() {
+ texture_manager2_->Destroy(false);
+ texture_manager2_.reset();
+ texture_manager1_->Destroy(false);
+ texture_manager1_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ protected:
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_refptr<CountingMemoryTracker> memory_tracker1_;
+ scoped_ptr<TextureManager> texture_manager1_;
+ scoped_refptr<CountingMemoryTracker> memory_tracker2_;
+ scoped_ptr<TextureManager> texture_manager2_;
+};
+
+TEST_F(SharedTextureTest, DeleteTextures) {
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(0);
+ ref1 = NULL;
+ texture_manager1_->RemoveTexture(10);
+ testing::Mock::VerifyAndClearExpectations(gl_.get());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ref2 = NULL;
+ texture_manager2_->RemoveTexture(20);
+ testing::Mock::VerifyAndClearExpectations(gl_.get());
+}
+
+TEST_F(SharedTextureTest, TextureSafetyAccounting) {
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ // Newly created texture is renderable.
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+
+ // Associate new texture ref to other texture manager, should account for it
+ // too.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ // Make texture renderable but uncleared on one texture manager, should affect
+ // other one.
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ EXPECT_TRUE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_TRUE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_TRUE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_TRUE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_TRUE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_TRUE(texture_manager2_->HaveUnclearedMips());
+
+ // Make texture cleared on one texture manager, should affect other one.
+ texture_manager1_->SetLevelCleared(ref1.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+TEST_F(SharedTextureTest, FBOCompletenessCheck) {
+ const GLenum kCompleteValue = GL_FRAMEBUFFER_COMPLETE;
+ FramebufferManager framebuffer_manager1(1, 1);
+ texture_manager1_->set_framebuffer_manager(&framebuffer_manager1);
+ FramebufferManager framebuffer_manager2(1, 1);
+ texture_manager2_->set_framebuffer_manager(&framebuffer_manager2);
+
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ framebuffer_manager1.CreateFramebuffer(10, 10);
+ scoped_refptr<Framebuffer> framebuffer1 =
+ framebuffer_manager1.GetFramebuffer(10);
+ framebuffer1->AttachTexture(
+ GL_COLOR_ATTACHMENT0, ref1.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_NE(kCompleteValue, framebuffer1->IsPossiblyComplete());
+
+ // Make FBO complete in manager 1.
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(kCompleteValue, framebuffer1->IsPossiblyComplete());
+ framebuffer_manager1.MarkAsComplete(framebuffer1.get());
+ EXPECT_TRUE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+
+ // Share texture with manager 2.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ framebuffer_manager2.CreateFramebuffer(20, 20);
+ scoped_refptr<Framebuffer> framebuffer2 =
+ framebuffer_manager2.GetFramebuffer(20);
+ framebuffer2->AttachTexture(
+ GL_COLOR_ATTACHMENT0, ref2.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer2->IsPossiblyComplete());
+ framebuffer_manager2.MarkAsComplete(framebuffer2.get());
+ EXPECT_TRUE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+
+ // Change level for texture, both FBOs should be marked incomplete
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer1->IsPossiblyComplete());
+ framebuffer_manager1.MarkAsComplete(framebuffer1.get());
+ EXPECT_TRUE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_FALSE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer2->IsPossiblyComplete());
+ framebuffer_manager2.MarkAsComplete(framebuffer2.get());
+ EXPECT_TRUE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, _))
+ .Times(2)
+ .RetiresOnSaturation();
+ framebuffer_manager1.RemoveFramebuffer(10);
+ framebuffer_manager2.RemoveFramebuffer(20);
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+TEST_F(SharedTextureTest, Memory) {
+ size_t initial_memory1 = memory_tracker1_->GetSize(MemoryTracker::kUnmanaged);
+ size_t initial_memory2 = memory_tracker2_->GetSize(MemoryTracker::kUnmanaged);
+
+ // Newly created texture is unrenderable.
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 10,
+ 10,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+
+ EXPECT_LT(0u, ref1->texture()->estimated_size());
+ EXPECT_EQ(initial_memory1 + ref1->texture()->estimated_size(),
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+
+ // Associate new texture ref to other texture manager, it doesn't account for
+ // the texture memory, the first memory tracker still has it.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_EQ(initial_memory1 + ref1->texture()->estimated_size(),
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(initial_memory2,
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+
+ // Delete the texture, memory should go to the remaining tracker.
+ texture_manager1_->RemoveTexture(10);
+ ref1 = NULL;
+ EXPECT_EQ(initial_memory1,
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(initial_memory2 + ref2->texture()->estimated_size(),
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ref2 = NULL;
+ texture_manager2_->RemoveTexture(20);
+ EXPECT_EQ(initial_memory2,
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_F(SharedTextureTest, Images) {
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(ref1->texture()->HasImages());
+ EXPECT_FALSE(ref2->texture()->HasImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+ EXPECT_FALSE(texture_manager2_->HaveImages());
+ scoped_refptr<gfx::GLImage> image1(new gfx::GLImageStub);
+ texture_manager1_->SetLevelImage(ref1.get(), GL_TEXTURE_2D, 1, image1.get());
+ EXPECT_TRUE(ref1->texture()->HasImages());
+ EXPECT_TRUE(ref2->texture()->HasImages());
+ EXPECT_TRUE(texture_manager1_->HaveImages());
+ EXPECT_TRUE(texture_manager2_->HaveImages());
+ scoped_refptr<gfx::GLImage> image2(new gfx::GLImageStub);
+ texture_manager1_->SetLevelImage(ref1.get(), GL_TEXTURE_2D, 1, image2.get());
+ EXPECT_TRUE(ref1->texture()->HasImages());
+ EXPECT_TRUE(ref2->texture()->HasImages());
+ EXPECT_TRUE(texture_manager1_->HaveImages());
+ EXPECT_TRUE(texture_manager2_->HaveImages());
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(ref1->texture()->HasImages());
+ EXPECT_FALSE(ref2->texture()->HasImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/transfer_buffer_manager.cc b/gpu/command_buffer/service/transfer_buffer_manager.cc
new file mode 100644
index 0000000..4404a9e
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/debug/trace_event.h"
+#include "base/process/process_handle.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+TransferBufferManagerInterface::~TransferBufferManagerInterface() {
+}
+
+TransferBufferManager::TransferBufferManager()
+ : shared_memory_bytes_allocated_(0) {
+}
+
+TransferBufferManager::~TransferBufferManager() {
+ while (!registered_buffers_.empty()) {
+ BufferMap::iterator it = registered_buffers_.begin();
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ registered_buffers_.erase(it);
+ }
+ DCHECK(!shared_memory_bytes_allocated_);
+}
+
+bool TransferBufferManager::Initialize() {
+ return true;
+}
+
+bool TransferBufferManager::RegisterTransferBuffer(
+ int32 id,
+ scoped_ptr<BufferBacking> buffer_backing) {
+ if (id <= 0) {
+ DVLOG(0) << "Cannot register transfer buffer with non-positive ID.";
+ return false;
+ }
+
+ // Fail if the ID is in use.
+ if (registered_buffers_.find(id) != registered_buffers_.end()) {
+ DVLOG(0) << "Buffer ID already in use.";
+ return false;
+ }
+
+ // Register the shared memory with the ID.
+ scoped_refptr<Buffer> buffer(new gpu::Buffer(buffer_backing.Pass()));
+
+ // Check buffer alignment is sane.
+ DCHECK(!(reinterpret_cast<uintptr_t>(buffer->memory()) &
+ (kCommandBufferEntrySize - 1)));
+
+ shared_memory_bytes_allocated_ += buffer->size();
+ TRACE_COUNTER_ID1(
+ "gpu", "GpuTransferBufferMemory", this, shared_memory_bytes_allocated_);
+
+ registered_buffers_[id] = buffer;
+
+ return true;
+}
+
+void TransferBufferManager::DestroyTransferBuffer(int32 id) {
+ BufferMap::iterator it = registered_buffers_.find(id);
+ if (it == registered_buffers_.end()) {
+ DVLOG(0) << "Transfer buffer ID was not registered.";
+ return;
+ }
+
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ TRACE_COUNTER_ID1(
+ "gpu", "GpuTransferBufferMemory", this, shared_memory_bytes_allocated_);
+
+ registered_buffers_.erase(it);
+}
+
+scoped_refptr<Buffer> TransferBufferManager::GetTransferBuffer(int32 id) {
+ if (id == 0)
+ return NULL;
+
+ BufferMap::iterator it = registered_buffers_.find(id);
+ if (it == registered_buffers_.end())
+ return NULL;
+
+ return it->second;
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/transfer_buffer_manager.h b/gpu/command_buffer/service/transfer_buffer_manager.h
new file mode 100644
index 0000000..d8bb3bb
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+
+namespace gpu {
+
+class GPU_EXPORT TransferBufferManagerInterface {
+ public:
+ virtual ~TransferBufferManagerInterface();
+
+ virtual bool RegisterTransferBuffer(int32 id,
+ scoped_ptr<BufferBacking> buffer) = 0;
+ virtual void DestroyTransferBuffer(int32 id) = 0;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) = 0;
+};
+
+class GPU_EXPORT TransferBufferManager
+ : public TransferBufferManagerInterface {
+ public:
+ TransferBufferManager();
+
+ bool Initialize();
+ virtual bool RegisterTransferBuffer(int32 id,
+ scoped_ptr<BufferBacking> buffer_backing)
+ OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+
+ private:
+ virtual ~TransferBufferManager();
+
+ typedef base::hash_map<int32, scoped_refptr<Buffer> > BufferMap;
+ BufferMap registered_buffers_;
+ size_t shared_memory_bytes_allocated_;
+
+ DISALLOW_COPY_AND_ASSIGN(TransferBufferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc b/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc
new file mode 100644
index 0000000..b047978
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using base::SharedMemory;
+
+namespace gpu {
+
+const static size_t kBufferSize = 1024;
+
+class TransferBufferManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ ASSERT_TRUE(manager->Initialize());
+ }
+
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+};
+
+TEST_F(TransferBufferManagerTest, ZeroHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(0).get());
+}
+
+TEST_F(TransferBufferManagerTest, NegativeHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(-1).get());
+}
+
+TEST_F(TransferBufferManagerTest, OutOfRangeHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(1).get());
+}
+
+TEST_F(TransferBufferManagerTest, CanRegisterTransferBuffer) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kBufferSize);
+ base::SharedMemory* shm_raw_pointer = shm.get();
+ scoped_ptr<SharedMemoryBufferBacking> backing(
+ new SharedMemoryBufferBacking(shm.Pass(), kBufferSize));
+ SharedMemoryBufferBacking* backing_raw_ptr = backing.get();
+
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, backing.PassAs<BufferBacking>()));
+ scoped_refptr<Buffer> registered =
+ transfer_buffer_manager_->GetTransferBuffer(1);
+
+ // Shared-memory ownership is transfered. It should be the same memory.
+ EXPECT_EQ(backing_raw_ptr, registered->backing());
+ EXPECT_EQ(shm_raw_pointer, backing_raw_ptr->shared_memory());
+}
+
+class FakeBufferBacking : public BufferBacking {
+ public:
+ virtual void* GetMemory() const OVERRIDE {
+ return reinterpret_cast<void*>(0xBADF00D0);
+ }
+ virtual size_t GetSize() const OVERRIDE { return 42; }
+ static scoped_ptr<BufferBacking> Make() {
+ return scoped_ptr<BufferBacking>(new FakeBufferBacking);
+ }
+};
+
+TEST_F(TransferBufferManagerTest, CanDestroyTransferBuffer) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, scoped_ptr<BufferBacking>(new FakeBufferBacking)));
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+ scoped_refptr<Buffer> registered =
+ transfer_buffer_manager_->GetTransferBuffer(1);
+
+ scoped_refptr<Buffer> null_buffer;
+ EXPECT_EQ(null_buffer, registered);
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegregisterTransferBufferId) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, CanReuseTransferBufferIdAfterDestroying) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, DestroyUnusedTransferBufferIdDoesNotCrash) {
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegisterNullTransferBuffer) {
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 0, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegisterNegativeTransferBufferId) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kBufferSize);
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ -1, FakeBufferBacking::Make()));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/vertex_array_manager.cc b/gpu/command_buffer/service/vertex_array_manager.cc
new file mode 100644
index 0000000..1560c04
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+VertexArrayManager::VertexArrayManager()
+ : vertex_attrib_manager_count_(0),
+ have_context_(true) {
+}
+
+VertexArrayManager::~VertexArrayManager() {
+ DCHECK(vertex_attrib_managers_.empty());
+ CHECK_EQ(vertex_attrib_manager_count_, 0u);
+}
+
+void VertexArrayManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ vertex_attrib_managers_.clear();
+}
+
+scoped_refptr<VertexAttribManager>
+VertexArrayManager::CreateVertexAttribManager(GLuint client_id,
+ GLuint service_id,
+ uint32 num_vertex_attribs,
+ bool client_visible) {
+ scoped_refptr<VertexAttribManager> vertex_attrib_manager(
+ new VertexAttribManager(this, service_id, num_vertex_attribs));
+
+ if (client_visible) {
+ std::pair<VertexAttribManagerMap::iterator, bool> result =
+ vertex_attrib_managers_.insert(
+ std::make_pair(client_id, vertex_attrib_manager));
+ DCHECK(result.second);
+ }
+
+ return vertex_attrib_manager;
+}
+
+VertexAttribManager* VertexArrayManager::GetVertexAttribManager(
+ GLuint client_id) {
+ VertexAttribManagerMap::iterator it = vertex_attrib_managers_.find(client_id);
+ return it != vertex_attrib_managers_.end() ? it->second.get() : NULL;
+}
+
+void VertexArrayManager::RemoveVertexAttribManager(GLuint client_id) {
+ VertexAttribManagerMap::iterator it = vertex_attrib_managers_.find(client_id);
+ if (it != vertex_attrib_managers_.end()) {
+ VertexAttribManager* vertex_attrib_manager = it->second.get();
+ vertex_attrib_manager->MarkAsDeleted();
+ vertex_attrib_managers_.erase(it);
+ }
+}
+
+void VertexArrayManager::StartTracking(
+ VertexAttribManager* /* vertex_attrib_manager */) {
+ ++vertex_attrib_manager_count_;
+}
+
+void VertexArrayManager::StopTracking(
+ VertexAttribManager* /* vertex_attrib_manager */) {
+ --vertex_attrib_manager_count_;
+}
+
+bool VertexArrayManager::GetClientId(
+ GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (VertexAttribManagerMap::const_iterator it =
+ vertex_attrib_managers_.begin();
+ it != vertex_attrib_managers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/vertex_array_manager.h b/gpu/command_buffer/service/vertex_array_manager.h
new file mode 100644
index 0000000..97ecc1a
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class VertexAttribManager;
+
+// This class keeps track of the vertex arrays and their sizes so we can do
+// bounds checking.
+class GPU_EXPORT VertexArrayManager {
+ public:
+ VertexArrayManager();
+ ~VertexArrayManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a VertexAttribManager and if client_visible,
+ // maps it to the client_id.
+ scoped_refptr<VertexAttribManager> CreateVertexAttribManager(
+ GLuint client_id,
+ GLuint service_id,
+ uint32 num_vertex_attribs,
+ bool client_visible);
+
+ // Gets the vertex attrib manager for the given vertex array.
+ VertexAttribManager* GetVertexAttribManager(GLuint client_id);
+
+ // Removes the vertex attrib manager for the given vertex array.
+ void RemoveVertexAttribManager(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ private:
+ friend class VertexAttribManager;
+
+ void StartTracking(VertexAttribManager* vertex_attrib_manager);
+ void StopTracking(VertexAttribManager* vertex_attrib_manager);
+
+ // Info for each vertex array in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<VertexAttribManager> >
+ VertexAttribManagerMap;
+ VertexAttribManagerMap vertex_attrib_managers_;
+
+ // Counts the number of VertexArrayInfo allocated with 'this' as its manager.
+ // Allows to check no VertexArrayInfo will outlive this.
+ unsigned int vertex_attrib_manager_count_;
+
+ bool have_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
diff --git a/gpu/command_buffer/service/vertex_array_manager_unittest.cc b/gpu/command_buffer/service/vertex_array_manager_unittest.cc
new file mode 100644
index 0000000..aa2df35
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager_unittest.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Pointee;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class VertexArrayManagerTest : public GpuServiceTest {
+ public:
+ static const uint32 kNumVertexAttribs = 8;
+
+ VertexArrayManagerTest() {
+ }
+
+ virtual ~VertexArrayManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ manager_.reset(new VertexArrayManager());
+ }
+
+ virtual void TearDown() {
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_ptr<VertexArrayManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const uint32 VertexArrayManagerTest::kNumVertexAttribs;
+#endif
+
+TEST_F(VertexArrayManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+
+ // Check we can create
+ manager_->CreateVertexAttribManager(
+ kClient1Id, kService1Id, kNumVertexAttribs, true);
+ // Check creation success
+ VertexAttribManager* info1 = manager_->GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 != NULL);
+ EXPECT_EQ(kService1Id, info1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_->GetClientId(info1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent name.
+ EXPECT_TRUE(manager_->GetVertexAttribManager(kClient2Id) == NULL);
+ // Check trying to a remove non-existent name does not crash.
+ manager_->RemoveVertexAttribManager(kClient2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the texture after we remove it.
+ manager_->RemoveVertexAttribManager(kClient1Id);
+ EXPECT_TRUE(manager_->GetVertexAttribManager(kClient1Id) == NULL);
+}
+
+TEST_F(VertexArrayManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ VertexArrayManager manager;
+ // Check we can create
+ manager.CreateVertexAttribManager(
+ kClient1Id, kService1Id, kNumVertexAttribs, true);
+ // Check creation success
+ VertexAttribManager* info1 = manager.GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 != NULL);
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager.Destroy(true);
+ // Check that resources got freed.
+ info1 = manager.GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/vertex_attrib_manager.cc b/gpu/command_buffer/service/vertex_attrib_manager.cc
new file mode 100644
index 0000000..8725c4f
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include <list>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+VertexAttrib::VertexAttrib()
+ : index_(0),
+ enabled_(false),
+ size_(4),
+ type_(GL_FLOAT),
+ offset_(0),
+ normalized_(GL_FALSE),
+ gl_stride_(0),
+ real_stride_(16),
+ divisor_(0),
+ is_client_side_array_(false),
+ list_(NULL) {
+}
+
+VertexAttrib::~VertexAttrib() {
+}
+
+void VertexAttrib::SetInfo(
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset) {
+ DCHECK_GT(real_stride, 0);
+ buffer_ = buffer;
+ size_ = size;
+ type_ = type;
+ normalized_ = normalized;
+ gl_stride_ = gl_stride;
+ real_stride_ = real_stride;
+ offset_ = offset;
+}
+
+void VertexAttrib::Unbind(Buffer* buffer) {
+ if (buffer_.get() == buffer) {
+ buffer_ = NULL;
+ }
+}
+
+bool VertexAttrib::CanAccess(GLuint index) const {
+ if (!enabled_) {
+ return true;
+ }
+
+ if (!buffer_.get() || buffer_->IsDeleted()) {
+ return false;
+ }
+
+ // The number of elements that can be accessed.
+ GLsizeiptr buffer_size = buffer_->size();
+ if (offset_ > buffer_size || real_stride_ == 0) {
+ return false;
+ }
+
+ uint32 usable_size = buffer_size - offset_;
+ GLuint num_elements = usable_size / real_stride_ +
+ ((usable_size % real_stride_) >=
+ (GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type_) * size_) ? 1 : 0);
+ return index < num_elements;
+}
+
+VertexAttribManager::VertexAttribManager()
+ : num_fixed_attribs_(0),
+ element_array_buffer_(NULL),
+ manager_(NULL),
+ deleted_(false),
+ service_id_(0) {
+}
+
+VertexAttribManager::VertexAttribManager(
+ VertexArrayManager* manager, GLuint service_id, uint32 num_vertex_attribs)
+ : num_fixed_attribs_(0),
+ element_array_buffer_(NULL),
+ manager_(manager),
+ deleted_(false),
+ service_id_(service_id) {
+ manager_->StartTracking(this);
+ Initialize(num_vertex_attribs, false);
+}
+
+VertexAttribManager::~VertexAttribManager() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ if (service_id_ != 0) // 0 indicates an emulated VAO
+ glDeleteVertexArraysOES(1, &service_id_);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void VertexAttribManager::Initialize(
+ uint32 max_vertex_attribs, bool init_attribs) {
+ vertex_attribs_.resize(max_vertex_attribs);
+
+ for (uint32 vv = 0; vv < vertex_attribs_.size(); ++vv) {
+ vertex_attribs_[vv].set_index(vv);
+ vertex_attribs_[vv].SetList(&disabled_vertex_attribs_);
+
+ if (init_attribs) {
+ glVertexAttrib4f(vv, 0.0f, 0.0f, 0.0f, 1.0f);
+ }
+ }
+}
+
+void VertexAttribManager::SetElementArrayBuffer(Buffer* buffer) {
+ element_array_buffer_ = buffer;
+}
+
+bool VertexAttribManager::Enable(GLuint index, bool enable) {
+ if (index >= vertex_attribs_.size()) {
+ return false;
+ }
+ VertexAttrib& info = vertex_attribs_[index];
+ if (info.enabled() != enable) {
+ info.set_enabled(enable);
+ info.SetList(enable ? &enabled_vertex_attribs_ : &disabled_vertex_attribs_);
+ }
+ return true;
+}
+
+void VertexAttribManager::Unbind(Buffer* buffer) {
+ if (element_array_buffer_.get() == buffer) {
+ element_array_buffer_ = NULL;
+ }
+ for (uint32 vv = 0; vv < vertex_attribs_.size(); ++vv) {
+ vertex_attribs_[vv].Unbind(buffer);
+ }
+}
+
+bool VertexAttribManager::ValidateBindings(
+ const char* function_name,
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info,
+ Program* current_program,
+ GLuint max_vertex_accessed,
+ bool instanced,
+ GLsizei primcount) {
+ DCHECK(primcount);
+ ErrorState* error_state = decoder->GetErrorState();
+ // true if any enabled, used divisor is zero
+ bool divisor0 = false;
+ bool have_enabled_active_attribs = false;
+ const GLuint kInitialBufferId = 0xFFFFFFFFU;
+ GLuint current_buffer_id = kInitialBufferId;
+ bool use_client_side_arrays_for_stream_buffers = feature_info->workarounds(
+ ).use_client_side_arrays_for_stream_buffers;
+ // Validate all attribs currently enabled. If they are used by the current
+ // program then check that they have enough elements to handle the draw call.
+ // If they are not used by the current program check that they have a buffer
+ // assigned.
+ for (VertexAttribList::iterator it = enabled_vertex_attribs_.begin();
+ it != enabled_vertex_attribs_.end(); ++it) {
+ VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ current_program->GetAttribInfoByLocation(attrib->index());
+ if (attrib_info) {
+ divisor0 |= (attrib->divisor() == 0);
+ have_enabled_active_attribs = true;
+ GLuint count = attrib->MaxVertexAccessed(primcount, max_vertex_accessed);
+ // This attrib is used in the current program.
+ if (!attrib->CanAccess(count)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string(
+ "attempt to access out of range vertices in attribute ") +
+ base::IntToString(attrib->index())).c_str());
+ return false;
+ }
+ if (use_client_side_arrays_for_stream_buffers) {
+ Buffer* buffer = attrib->buffer();
+ glEnableVertexAttribArray(attrib->index());
+ if (buffer->IsClientSideArray()) {
+ if (current_buffer_id != 0) {
+ current_buffer_id = 0;
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ }
+ attrib->set_is_client_side_array(true);
+ const void* ptr = buffer->GetRange(attrib->offset(), 0);
+ DCHECK(ptr);
+ glVertexAttribPointer(
+ attrib->index(),
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+ } else if (attrib->is_client_side_array()) {
+ attrib->set_is_client_side_array(false);
+ GLuint new_buffer_id = buffer->service_id();
+ if (new_buffer_id != current_buffer_id) {
+ current_buffer_id = new_buffer_id;
+ glBindBuffer(GL_ARRAY_BUFFER, current_buffer_id);
+ }
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ glVertexAttribPointer(
+ attrib->index(),
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+ }
+ }
+ } else {
+ // This attrib is not used in the current program.
+ if (!attrib->buffer()) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string(
+ "attempt to render with no buffer attached to "
+ "enabled attribute ") +
+ base::IntToString(attrib->index())).c_str());
+ return false;
+ } else if (use_client_side_arrays_for_stream_buffers) {
+ Buffer* buffer = attrib->buffer();
+ // Disable client side arrays for unused attributes else we'll
+ // read bad memory
+ if (buffer->IsClientSideArray()) {
+ // Don't disable attrib 0 since it's special.
+ if (attrib->index() > 0) {
+ glDisableVertexAttribArray(attrib->index());
+ }
+ }
+ }
+ }
+ }
+
+ // Instanced drawing needs at least one enabled attribute with divisor zero.
+ // Non-instanced drawing is fine with having no attributes at all, but if
+ // there are attributes, at least one should have divisor zero.
+ // (See ANGLE_instanced_arrays spec)
+ if (!divisor0 && (instanced || have_enabled_active_attribs)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "attempt to draw with all attributes having non-zero divisors");
+ return false;
+ }
+
+ if (current_buffer_id != kInitialBufferId) {
+ // Restore the buffer binding.
+ decoder->RestoreBufferBindings();
+ }
+
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/vertex_attrib_manager.h b/gpu/command_buffer/service/vertex_attrib_manager.h
new file mode 100644
index 0000000..73fa480
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager.h
@@ -0,0 +1,296 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+
+#include <list>
+#include <vector>
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "build/build_config.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class FeatureInfo;
+class GLES2Decoder;
+class Program;
+class VertexArrayManager;
+
+// Info about a Vertex Attribute. This is used to track what the user currently
+// has bound on each Vertex Attribute so that checking can be done at
+// glDrawXXX time.
+class GPU_EXPORT VertexAttrib {
+ public:
+ typedef std::list<VertexAttrib*> VertexAttribList;
+
+ VertexAttrib();
+ ~VertexAttrib();
+
+ // Returns true if this VertexAttrib can access index.
+ bool CanAccess(GLuint index) const;
+
+ Buffer* buffer() const { return buffer_.get(); }
+
+ GLsizei offset() const {
+ return offset_;
+ }
+
+ GLuint index() const {
+ return index_;
+ }
+
+ GLint size() const {
+ return size_;
+ }
+
+ GLenum type() const {
+ return type_;
+ }
+
+ GLboolean normalized() const {
+ return normalized_;
+ }
+
+ GLsizei gl_stride() const {
+ return gl_stride_;
+ }
+
+ GLuint divisor() const {
+ return divisor_;
+ }
+
+ bool enabled() const {
+ return enabled_;
+ }
+
+ // Find the maximum vertex accessed, accounting for instancing.
+ GLuint MaxVertexAccessed(GLsizei primcount,
+ GLuint max_vertex_accessed) const {
+ return divisor_ ? ((primcount - 1) / divisor_) : max_vertex_accessed;
+ }
+
+ bool is_client_side_array() const {
+ return is_client_side_array_;
+ }
+
+ void set_is_client_side_array(bool value) {
+ is_client_side_array_ = value;
+ }
+
+ private:
+ friend class VertexAttribManager;
+
+ void set_enabled(bool enabled) {
+ enabled_ = enabled;
+ }
+
+ void set_index(GLuint index) {
+ index_ = index;
+ }
+
+ void SetList(VertexAttribList* new_list) {
+ DCHECK(new_list);
+
+ if (list_) {
+ list_->erase(it_);
+ }
+
+ it_ = new_list->insert(new_list->end(), this);
+ list_ = new_list;
+ }
+
+ void SetInfo(
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset);
+
+ void SetDivisor(GLsizei divisor) {
+ divisor_ = divisor;
+ }
+
+ void Unbind(Buffer* buffer);
+
+ // The index of this attrib.
+ GLuint index_;
+
+ // Whether or not this attribute is enabled.
+ bool enabled_;
+
+ // number of components (1, 2, 3, 4)
+ GLint size_;
+
+ // GL_BYTE, GL_FLOAT, etc. See glVertexAttribPointer.
+ GLenum type_;
+
+ // The offset into the buffer.
+ GLsizei offset_;
+
+ GLboolean normalized_;
+
+ // The stride passed to glVertexAttribPointer.
+ GLsizei gl_stride_;
+
+ // The stride that will be used to access the buffer. This is the actual
+ // stide, NOT the GL bogus stride. In other words there is never a stride
+ // of 0.
+ GLsizei real_stride_;
+
+ GLsizei divisor_;
+
+ // Will be true if this was assigned to a client side array.
+ bool is_client_side_array_;
+
+ // The buffer bound to this attribute.
+ scoped_refptr<Buffer> buffer_;
+
+ // List this info is on.
+ VertexAttribList* list_;
+
+ // Iterator for list this info is on. Enabled/Disabled
+ VertexAttribList::iterator it_;
+};
+
+// Manages vertex attributes.
+// This class also acts as the service-side representation of a
+// vertex array object and it's contained state.
+class GPU_EXPORT VertexAttribManager :
+ public base::RefCounted<VertexAttribManager> {
+ public:
+ typedef std::list<VertexAttrib*> VertexAttribList;
+
+ VertexAttribManager();
+
+ void Initialize(uint32 num_vertex_attribs, bool init_attribs);
+
+ bool Enable(GLuint index, bool enable);
+
+ bool HaveFixedAttribs() const {
+ return num_fixed_attribs_ != 0;
+ }
+
+ const VertexAttribList& GetEnabledVertexAttribs() const {
+ return enabled_vertex_attribs_;
+ }
+
+ VertexAttrib* GetVertexAttrib(GLuint index) {
+ if (index < vertex_attribs_.size()) {
+ return &vertex_attribs_[index];
+ }
+ return NULL;
+ }
+
+ void SetAttribInfo(
+ GLuint index,
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset) {
+ VertexAttrib* attrib = GetVertexAttrib(index);
+ if (attrib) {
+ if (attrib->type() == GL_FIXED) {
+ --num_fixed_attribs_;
+ }
+ if (type == GL_FIXED) {
+ ++num_fixed_attribs_;
+ }
+ attrib->SetInfo(
+ buffer, size, type, normalized, gl_stride, real_stride, offset);
+ }
+ }
+
+ void SetDivisor(GLuint index, GLuint divisor) {
+ VertexAttrib* attrib = GetVertexAttrib(index);
+ if (attrib) {
+ attrib->SetDivisor(divisor);
+ }
+ }
+
+ void SetElementArrayBuffer(Buffer* buffer);
+
+ Buffer* element_array_buffer() const { return element_array_buffer_.get(); }
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ void Unbind(Buffer* buffer);
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return !IsDeleted();
+ }
+
+ size_t num_attribs() const {
+ return vertex_attribs_.size();
+ }
+
+ bool ValidateBindings(
+ const char* function_name,
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info,
+ Program* current_program,
+ GLuint max_vertex_accessed,
+ bool instanced,
+ GLsizei primcount);
+
+ private:
+ friend class VertexArrayManager;
+ friend class VertexArrayManagerTest;
+ friend class base::RefCounted<VertexAttribManager>;
+
+ // Used when creating from a VertexArrayManager
+ VertexAttribManager(VertexArrayManager* manager, GLuint service_id,
+ uint32 num_vertex_attribs);
+
+ ~VertexAttribManager();
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // number of attribs using type GL_FIXED.
+ int num_fixed_attribs_;
+
+ // Info for each vertex attribute saved so we can check at glDrawXXX time
+ // if it is safe to draw.
+ std::vector<VertexAttrib> vertex_attribs_;
+
+ // The currently bound element array buffer. If this is 0 it is illegal
+ // to call glDrawElements.
+ scoped_refptr<Buffer> element_array_buffer_;
+
+ // Lists for which vertex attribs are enabled, disabled.
+ VertexAttribList enabled_vertex_attribs_;
+ VertexAttribList disabled_vertex_attribs_;
+
+ // The VertexArrayManager that owns this VertexAttribManager
+ VertexArrayManager* manager_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // Service side vertex array object id.
+ GLuint service_id_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc b/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc
new file mode 100644
index 0000000..e7fd690
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Pointee;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class VertexAttribManagerTest : public GpuServiceTest {
+ public:
+ static const uint32 kNumVertexAttribs = 8;
+
+ VertexAttribManagerTest() {
+ }
+
+ virtual ~VertexAttribManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ for (uint32 ii = 0; ii < kNumVertexAttribs; ++ii) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(ii, 0.0f, 0.0f, 0.0f, 1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ manager_ = new VertexAttribManager();
+ manager_->Initialize(kNumVertexAttribs, true);
+ }
+
+ scoped_refptr<VertexAttribManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const uint32 VertexAttribManagerTest::kNumVertexAttribs;
+#endif
+
+TEST_F(VertexAttribManagerTest, Basic) {
+ EXPECT_TRUE(manager_->GetVertexAttrib(kNumVertexAttribs) == NULL);
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ manager_->GetEnabledVertexAttribs();
+ EXPECT_EQ(0u, enabled_attribs.size());
+
+ for (uint32 ii = 0; ii < kNumVertexAttribs; ii += kNumVertexAttribs - 1) {
+ VertexAttrib* attrib = manager_->GetVertexAttrib(ii);
+ ASSERT_TRUE(attrib != NULL);
+ EXPECT_EQ(ii, attrib->index());
+ EXPECT_TRUE(attrib->buffer() == NULL);
+ EXPECT_EQ(0, attrib->offset());
+ EXPECT_EQ(4, attrib->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_FLOAT), attrib->type());
+ EXPECT_EQ(GL_FALSE, attrib->normalized());
+ EXPECT_EQ(0, attrib->gl_stride());
+ EXPECT_FALSE(attrib->enabled());
+ manager_->Enable(ii, true);
+ EXPECT_TRUE(attrib->enabled());
+ }
+}
+
+TEST_F(VertexAttribManagerTest, Enable) {
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ manager_->GetEnabledVertexAttribs();
+
+ VertexAttrib* attrib1 = manager_->GetVertexAttrib(1);
+ VertexAttrib* attrib2 = manager_->GetVertexAttrib(3);
+
+ manager_->Enable(1, true);
+ ASSERT_EQ(1u, enabled_attribs.size());
+ EXPECT_TRUE(attrib1->enabled());
+ manager_->Enable(3, true);
+ ASSERT_EQ(2u, enabled_attribs.size());
+ EXPECT_TRUE(attrib2->enabled());
+
+ manager_->Enable(1, false);
+ ASSERT_EQ(1u, enabled_attribs.size());
+ EXPECT_FALSE(attrib1->enabled());
+
+ manager_->Enable(3, false);
+ ASSERT_EQ(0u, enabled_attribs.size());
+ EXPECT_FALSE(attrib2->enabled());
+}
+
+TEST_F(VertexAttribManagerTest, SetAttribInfo) {
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ Buffer* buffer = buffer_manager.GetBuffer(1);
+ ASSERT_TRUE(buffer != NULL);
+
+ VertexAttrib* attrib = manager_->GetVertexAttrib(1);
+
+ manager_->SetAttribInfo(1, buffer, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+
+ EXPECT_EQ(buffer, attrib->buffer());
+ EXPECT_EQ(4, attrib->offset());
+ EXPECT_EQ(3, attrib->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_SHORT), attrib->type());
+ EXPECT_EQ(GL_TRUE, attrib->normalized());
+ EXPECT_EQ(32, attrib->gl_stride());
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+TEST_F(VertexAttribManagerTest, HaveFixedAttribs) {
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(1, NULL, 4, GL_FIXED, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(3, NULL, 4, GL_FIXED, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(1, NULL, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(3, NULL, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+}
+
+TEST_F(VertexAttribManagerTest, CanAccess) {
+ MockErrorState error_state;
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ Buffer* buffer = buffer_manager.GetBuffer(1);
+ ASSERT_TRUE(buffer != NULL);
+
+ VertexAttrib* attrib = manager_->GetVertexAttrib(1);
+
+ EXPECT_TRUE(attrib->CanAccess(0));
+ manager_->Enable(1, true);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ EXPECT_TRUE(buffer_manager.SetTarget(buffer, GL_ARRAY_BUFFER));
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 15, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+
+ EXPECT_FALSE(attrib->CanAccess(0));
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 16, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 1);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 32, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(attrib->CanAccess(1));
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 20, 0);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+TEST_F(VertexAttribManagerTest, Unbind) {
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ buffer_manager.CreateBuffer(3, 4);
+ Buffer* buffer1 = buffer_manager.GetBuffer(1);
+ Buffer* buffer2 = buffer_manager.GetBuffer(3);
+ ASSERT_TRUE(buffer1 != NULL);
+ ASSERT_TRUE(buffer2 != NULL);
+
+ VertexAttrib* attrib1 = manager_->GetVertexAttrib(1);
+ VertexAttrib* attrib3 = manager_->GetVertexAttrib(3);
+
+ // Attach to 2 buffers.
+ manager_->SetAttribInfo(1, buffer1, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+ manager_->SetAttribInfo(3, buffer1, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+ // Check they were attached.
+ EXPECT_EQ(buffer1, attrib1->buffer());
+ EXPECT_EQ(buffer1, attrib3->buffer());
+ // Unbind unattached buffer.
+ manager_->Unbind(buffer2);
+ // Should be no-op.
+ EXPECT_EQ(buffer1, attrib1->buffer());
+ EXPECT_EQ(buffer1, attrib3->buffer());
+ // Unbind buffer.
+ manager_->Unbind(buffer1);
+ // Check they were detached
+ EXPECT_TRUE(NULL == attrib1->buffer());
+ EXPECT_TRUE(NULL == attrib3->buffer());
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+// TODO(gman): Test ValidateBindings
+// TODO(gman): Test ValidateBindings with client side arrays.
+
+} // namespace gles2
+} // namespace gpu
+
+