Clone of chromium aad1ce808763f59c7a3753e08f1500a104ecc6fd refs/remotes/origin/HEAD
diff --git a/gpu/command_buffer/OWNERS b/gpu/command_buffer/OWNERS
new file mode 100644
index 0000000..cd07f4d
--- /dev/null
+++ b/gpu/command_buffer/OWNERS
@@ -0,0 +1,5 @@
+piman@chromium.org
+jbauman@chromium.org
+bajones@chromium.org
+zmo@chromium.org
+vmiura@chromium.org
diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py
new file mode 100755
index 0000000..5c9e127
--- /dev/null
+++ b/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -0,0 +1,8652 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""code generator for GLES2 command buffers."""
+
+import itertools
+import os
+import os.path
+import sys
+import re
+from optparse import OptionParser
+from subprocess import call
+
+_SIZE_OF_UINT32 = 4
+_SIZE_OF_COMMAND_HEADER = 4
+_FIRST_SPECIFIC_COMMAND_ID = 256
+
+_LICENSE = """// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"""
+
+_DO_NOT_EDIT_WARNING = """// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+"""
+
+# This string is copied directly out of the gl2.h file from GLES2.0
+#
+# Edits:
+#
+# *) Any argument that is a resourceID has been changed to GLid<Type>.
+# (not pointer arguments) and if it's allowed to be zero it's GLidZero<Type>
+# If it's allowed to not exist it's GLidBind<Type>
+#
+# *) All GLenums have been changed to GLenumTypeOfEnum
+#
+_GL_TYPES = {
+ 'GLenum': 'unsigned int',
+ 'GLboolean': 'unsigned char',
+ 'GLbitfield': 'unsigned int',
+ 'GLbyte': 'signed char',
+ 'GLshort': 'short',
+ 'GLint': 'int',
+ 'GLsizei': 'int',
+ 'GLubyte': 'unsigned char',
+ 'GLushort': 'unsigned short',
+ 'GLuint': 'unsigned int',
+ 'GLfloat': 'float',
+ 'GLclampf': 'float',
+ 'GLvoid': 'void',
+ 'GLfixed': 'int',
+ 'GLclampx': 'int'
+}
+
+_GL_TYPES_32 = {
+ 'GLintptr': 'long int',
+ 'GLsizeiptr': 'long int'
+}
+
+_GL_TYPES_64 = {
+ 'GLintptr': 'long long int',
+ 'GLsizeiptr': 'long long int'
+}
+
+# Capabilites selected with glEnable
+_CAPABILITY_FLAGS = [
+ {'name': 'blend'},
+ {'name': 'cull_face'},
+ {'name': 'depth_test', 'state_flag': 'framebuffer_state_.clear_state_dirty'},
+ {'name': 'dither', 'default': True},
+ {'name': 'polygon_offset_fill'},
+ {'name': 'sample_alpha_to_coverage'},
+ {'name': 'sample_coverage'},
+ {'name': 'scissor_test'},
+ {'name': 'stencil_test',
+ 'state_flag': 'framebuffer_state_.clear_state_dirty'},
+]
+
+_STATES = {
+ 'ClearColor': {
+ 'type': 'Normal',
+ 'func': 'ClearColor',
+ 'enum': 'GL_COLOR_CLEAR_VALUE',
+ 'states': [
+ {'name': 'color_clear_red', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'color_clear_green', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'color_clear_blue', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'color_clear_alpha', 'type': 'GLfloat', 'default': '0.0f'},
+ ],
+ },
+ 'ClearDepthf': {
+ 'type': 'Normal',
+ 'func': 'ClearDepth',
+ 'enum': 'GL_DEPTH_CLEAR_VALUE',
+ 'states': [
+ {'name': 'depth_clear', 'type': 'GLclampf', 'default': '1.0f'},
+ ],
+ },
+ 'ColorMask': {
+ 'type': 'Normal',
+ 'func': 'ColorMask',
+ 'enum': 'GL_COLOR_WRITEMASK',
+ 'states': [
+ {
+ 'name': 'color_mask_red',
+ 'type': 'GLboolean',
+ 'default': 'true',
+ 'cached': True
+ },
+ {
+ 'name': 'color_mask_green',
+ 'type': 'GLboolean',
+ 'default': 'true',
+ 'cached': True
+ },
+ {
+ 'name': 'color_mask_blue',
+ 'type': 'GLboolean',
+ 'default': 'true',
+ 'cached': True
+ },
+ {
+ 'name': 'color_mask_alpha',
+ 'type': 'GLboolean',
+ 'default': 'true',
+ 'cached': True
+ },
+ ],
+ 'state_flag': 'framebuffer_state_.clear_state_dirty',
+ },
+ 'ClearStencil': {
+ 'type': 'Normal',
+ 'func': 'ClearStencil',
+ 'enum': 'GL_STENCIL_CLEAR_VALUE',
+ 'states': [
+ {'name': 'stencil_clear', 'type': 'GLint', 'default': '0'},
+ ],
+ },
+ 'BlendColor': {
+ 'type': 'Normal',
+ 'func': 'BlendColor',
+ 'enum': 'GL_BLEND_COLOR',
+ 'states': [
+ {'name': 'blend_color_red', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'blend_color_green', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'blend_color_blue', 'type': 'GLfloat', 'default': '0.0f'},
+ {'name': 'blend_color_alpha', 'type': 'GLfloat', 'default': '0.0f'},
+ ],
+ },
+ 'BlendEquation': {
+ 'type': 'SrcDst',
+ 'func': 'BlendEquationSeparate',
+ 'states': [
+ {
+ 'name': 'blend_equation_rgb',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_EQUATION_RGB',
+ 'default': 'GL_FUNC_ADD',
+ },
+ {
+ 'name': 'blend_equation_alpha',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_EQUATION_ALPHA',
+ 'default': 'GL_FUNC_ADD',
+ },
+ ],
+ },
+ 'BlendFunc': {
+ 'type': 'SrcDst',
+ 'func': 'BlendFuncSeparate',
+ 'states': [
+ {
+ 'name': 'blend_source_rgb',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_SRC_RGB',
+ 'default': 'GL_ONE',
+ },
+ {
+ 'name': 'blend_dest_rgb',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_DST_RGB',
+ 'default': 'GL_ZERO',
+ },
+ {
+ 'name': 'blend_source_alpha',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_SRC_ALPHA',
+ 'default': 'GL_ONE',
+ },
+ {
+ 'name': 'blend_dest_alpha',
+ 'type': 'GLenum',
+ 'enum': 'GL_BLEND_DST_ALPHA',
+ 'default': 'GL_ZERO',
+ },
+ ],
+ },
+ 'PolygonOffset': {
+ 'type': 'Normal',
+ 'func': 'PolygonOffset',
+ 'states': [
+ {
+ 'name': 'polygon_offset_factor',
+ 'type': 'GLfloat',
+ 'enum': 'GL_POLYGON_OFFSET_FACTOR',
+ 'default': '0.0f',
+ },
+ {
+ 'name': 'polygon_offset_units',
+ 'type': 'GLfloat',
+ 'enum': 'GL_POLYGON_OFFSET_UNITS',
+ 'default': '0.0f',
+ },
+ ],
+ },
+ 'CullFace': {
+ 'type': 'Normal',
+ 'func': 'CullFace',
+ 'enum': 'GL_CULL_FACE_MODE',
+ 'states': [
+ {
+ 'name': 'cull_mode',
+ 'type': 'GLenum',
+ 'default': 'GL_BACK',
+ },
+ ],
+ },
+ 'FrontFace': {
+ 'type': 'Normal',
+ 'func': 'FrontFace',
+ 'enum': 'GL_FRONT_FACE',
+ 'states': [{'name': 'front_face', 'type': 'GLenum', 'default': 'GL_CCW'}],
+ },
+ 'DepthFunc': {
+ 'type': 'Normal',
+ 'func': 'DepthFunc',
+ 'enum': 'GL_DEPTH_FUNC',
+ 'states': [{'name': 'depth_func', 'type': 'GLenum', 'default': 'GL_LESS'}],
+ },
+ 'DepthRange': {
+ 'type': 'Normal',
+ 'func': 'DepthRange',
+ 'enum': 'GL_DEPTH_RANGE',
+ 'states': [
+ {'name': 'z_near', 'type': 'GLclampf', 'default': '0.0f'},
+ {'name': 'z_far', 'type': 'GLclampf', 'default': '1.0f'},
+ ],
+ },
+ 'SampleCoverage': {
+ 'type': 'Normal',
+ 'func': 'SampleCoverage',
+ 'states': [
+ {
+ 'name': 'sample_coverage_value',
+ 'type': 'GLclampf',
+ 'enum': 'GL_SAMPLE_COVERAGE_VALUE',
+ 'default': '1.0f',
+ },
+ {
+ 'name': 'sample_coverage_invert',
+ 'type': 'GLboolean',
+ 'enum': 'GL_SAMPLE_COVERAGE_INVERT',
+ 'default': 'false',
+ },
+ ],
+ },
+ 'StencilMask': {
+ 'type': 'FrontBack',
+ 'func': 'StencilMaskSeparate',
+ 'state_flag': 'framebuffer_state_.clear_state_dirty',
+ 'states': [
+ {
+ 'name': 'stencil_front_writemask',
+ 'type': 'GLuint',
+ 'enum': 'GL_STENCIL_WRITEMASK',
+ 'default': '0xFFFFFFFFU',
+ 'cached': True,
+ },
+ {
+ 'name': 'stencil_back_writemask',
+ 'type': 'GLuint',
+ 'enum': 'GL_STENCIL_BACK_WRITEMASK',
+ 'default': '0xFFFFFFFFU',
+ 'cached': True,
+ },
+ ],
+ },
+ 'StencilOp': {
+ 'type': 'FrontBack',
+ 'func': 'StencilOpSeparate',
+ 'states': [
+ {
+ 'name': 'stencil_front_fail_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_FAIL',
+ 'default': 'GL_KEEP',
+ },
+ {
+ 'name': 'stencil_front_z_fail_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_PASS_DEPTH_FAIL',
+ 'default': 'GL_KEEP',
+ },
+ {
+ 'name': 'stencil_front_z_pass_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_PASS_DEPTH_PASS',
+ 'default': 'GL_KEEP',
+ },
+ {
+ 'name': 'stencil_back_fail_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_BACK_FAIL',
+ 'default': 'GL_KEEP',
+ },
+ {
+ 'name': 'stencil_back_z_fail_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
+ 'default': 'GL_KEEP',
+ },
+ {
+ 'name': 'stencil_back_z_pass_op',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_BACK_PASS_DEPTH_PASS',
+ 'default': 'GL_KEEP',
+ },
+ ],
+ },
+ 'StencilFunc': {
+ 'type': 'FrontBack',
+ 'func': 'StencilFuncSeparate',
+ 'states': [
+ {
+ 'name': 'stencil_front_func',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_FUNC',
+ 'default': 'GL_ALWAYS',
+ },
+ {
+ 'name': 'stencil_front_ref',
+ 'type': 'GLint',
+ 'enum': 'GL_STENCIL_REF',
+ 'default': '0',
+ },
+ {
+ 'name': 'stencil_front_mask',
+ 'type': 'GLuint',
+ 'enum': 'GL_STENCIL_VALUE_MASK',
+ 'default': '0xFFFFFFFFU',
+ },
+ {
+ 'name': 'stencil_back_func',
+ 'type': 'GLenum',
+ 'enum': 'GL_STENCIL_BACK_FUNC',
+ 'default': 'GL_ALWAYS',
+ },
+ {
+ 'name': 'stencil_back_ref',
+ 'type': 'GLint',
+ 'enum': 'GL_STENCIL_BACK_REF',
+ 'default': '0',
+ },
+ {
+ 'name': 'stencil_back_mask',
+ 'type': 'GLuint',
+ 'enum': 'GL_STENCIL_BACK_VALUE_MASK',
+ 'default': '0xFFFFFFFFU',
+ },
+ ],
+ },
+ 'Hint': {
+ 'type': 'NamedParameter',
+ 'func': 'Hint',
+ 'states': [
+ {
+ 'name': 'hint_generate_mipmap',
+ 'type': 'GLenum',
+ 'enum': 'GL_GENERATE_MIPMAP_HINT',
+ 'default': 'GL_DONT_CARE'
+ },
+ {
+ 'name': 'hint_fragment_shader_derivative',
+ 'type': 'GLenum',
+ 'enum': 'GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES',
+ 'default': 'GL_DONT_CARE',
+ 'extension_flag': 'oes_standard_derivatives'
+ }
+ ],
+ },
+ 'PixelStore': {
+ 'type': 'NamedParameter',
+ 'func': 'PixelStorei',
+ 'states': [
+ {
+ 'name': 'pack_alignment',
+ 'type': 'GLint',
+ 'enum': 'GL_PACK_ALIGNMENT',
+ 'default': '4'
+ },
+ {
+ 'name': 'unpack_alignment',
+ 'type': 'GLint',
+ 'enum': 'GL_UNPACK_ALIGNMENT',
+ 'default': '4'
+ }
+ ],
+ },
+ # TODO: Consider implemenenting these states
+ # GL_ACTIVE_TEXTURE
+ 'LineWidth': {
+ 'type': 'Normal',
+ 'func': 'LineWidth',
+ 'enum': 'GL_LINE_WIDTH',
+ 'states': [
+ {
+ 'name': 'line_width',
+ 'type': 'GLfloat',
+ 'default': '1.0f',
+ 'range_checks': [{'check': "<= 0.0f", 'test_value': "0.0f"}],
+ 'nan_check': True,
+ }],
+ },
+ 'DepthMask': {
+ 'type': 'Normal',
+ 'func': 'DepthMask',
+ 'enum': 'GL_DEPTH_WRITEMASK',
+ 'states': [
+ {
+ 'name': 'depth_mask',
+ 'type': 'GLboolean',
+ 'default': 'true',
+ 'cached': True
+ },
+ ],
+ 'state_flag': 'framebuffer_state_.clear_state_dirty',
+ },
+ 'Scissor': {
+ 'type': 'Normal',
+ 'func': 'Scissor',
+ 'enum': 'GL_SCISSOR_BOX',
+ 'states': [
+ # NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
+ {
+ 'name': 'scissor_x',
+ 'type': 'GLint',
+ 'default': '0',
+ 'expected': 'kViewportX',
+ },
+ {
+ 'name': 'scissor_y',
+ 'type': 'GLint',
+ 'default': '0',
+ 'expected': 'kViewportY',
+ },
+ {
+ 'name': 'scissor_width',
+ 'type': 'GLsizei',
+ 'default': '1',
+ 'expected': 'kViewportWidth',
+ },
+ {
+ 'name': 'scissor_height',
+ 'type': 'GLsizei',
+ 'default': '1',
+ 'expected': 'kViewportHeight',
+ },
+ ],
+ },
+ 'Viewport': {
+ 'type': 'Normal',
+ 'func': 'Viewport',
+ 'enum': 'GL_VIEWPORT',
+ 'states': [
+ # NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
+ {
+ 'name': 'viewport_x',
+ 'type': 'GLint',
+ 'default': '0',
+ 'expected': 'kViewportX',
+ },
+ {
+ 'name': 'viewport_y',
+ 'type': 'GLint',
+ 'default': '0',
+ 'expected': 'kViewportY',
+ },
+ {
+ 'name': 'viewport_width',
+ 'type': 'GLsizei',
+ 'default': '1',
+ 'expected': 'kViewportWidth',
+ },
+ {
+ 'name': 'viewport_height',
+ 'type': 'GLsizei',
+ 'default': '1',
+ 'expected': 'kViewportHeight',
+ },
+ ],
+ },
+ 'MatrixValuesCHROMIUM': {
+ 'type': 'NamedParameter',
+ 'func': 'MatrixLoadfEXT',
+ 'states': [
+ { 'enum': 'GL_PATH_MODELVIEW_MATRIX_CHROMIUM',
+ 'enum_set': 'GL_PATH_MODELVIEW_CHROMIUM',
+ 'name': 'modelview_matrix',
+ 'type': 'GLfloat',
+ 'default': [
+ '1.0f', '0.0f','0.0f','0.0f',
+ '0.0f', '1.0f','0.0f','0.0f',
+ '0.0f', '0.0f','1.0f','0.0f',
+ '0.0f', '0.0f','0.0f','1.0f',
+ ],
+ 'extension_flag': 'chromium_path_rendering',
+ },
+ { 'enum': 'GL_PATH_PROJECTION_MATRIX_CHROMIUM',
+ 'enum_set': 'GL_PATH_PROJECTION_CHROMIUM',
+ 'name': 'projection_matrix',
+ 'type': 'GLfloat',
+ 'default': [
+ '1.0f', '0.0f','0.0f','0.0f',
+ '0.0f', '1.0f','0.0f','0.0f',
+ '0.0f', '0.0f','1.0f','0.0f',
+ '0.0f', '0.0f','0.0f','1.0f',
+ ],
+ 'extension_flag': 'chromium_path_rendering',
+ },
+ ],
+ },
+}
+
+# Named type info object represents a named type that is used in OpenGL call
+# arguments. Each named type defines a set of valid OpenGL call arguments. The
+# named types are used in 'cmd_buffer_functions.txt'.
+# type: The actual GL type of the named type.
+# valid: The list of values that are valid for both the client and the service.
+# invalid: Examples of invalid values for the type. At least these values
+# should be tested to be invalid.
+# is_complete: The list of valid values of type are final and will not be
+# modified during runtime.
+_NAMED_TYPE_INFO = {
+ 'BlitFilter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_NEAREST',
+ 'GL_LINEAR',
+ ],
+ 'invalid': [
+ 'GL_LINEAR_MIPMAP_LINEAR',
+ ],
+ },
+ 'FrameBufferTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_FRAMEBUFFER',
+ ],
+ 'invalid': [
+ 'GL_DRAW_FRAMEBUFFER' ,
+ 'GL_READ_FRAMEBUFFER' ,
+ ],
+ },
+ 'RenderBufferTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RENDERBUFFER',
+ ],
+ 'invalid': [
+ 'GL_FRAMEBUFFER',
+ ],
+ },
+ 'BufferTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ARRAY_BUFFER',
+ 'GL_ELEMENT_ARRAY_BUFFER',
+ ],
+ 'invalid': [
+ 'GL_RENDERBUFFER',
+ ],
+ },
+ 'BufferUsage': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_STREAM_DRAW',
+ 'GL_STATIC_DRAW',
+ 'GL_DYNAMIC_DRAW',
+ ],
+ 'invalid': [
+ 'GL_STATIC_READ',
+ ],
+ },
+ 'CompressedTextureFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ ],
+ },
+ 'GLState': {
+ 'type': 'GLenum',
+ 'valid': [
+ # NOTE: State an Capability entries added later.
+ 'GL_ACTIVE_TEXTURE',
+ 'GL_ALIASED_LINE_WIDTH_RANGE',
+ 'GL_ALIASED_POINT_SIZE_RANGE',
+ 'GL_ALPHA_BITS',
+ 'GL_ARRAY_BUFFER_BINDING',
+ 'GL_BLUE_BITS',
+ 'GL_COMPRESSED_TEXTURE_FORMATS',
+ 'GL_CURRENT_PROGRAM',
+ 'GL_DEPTH_BITS',
+ 'GL_DEPTH_RANGE',
+ 'GL_ELEMENT_ARRAY_BUFFER_BINDING',
+ 'GL_FRAMEBUFFER_BINDING',
+ 'GL_GENERATE_MIPMAP_HINT',
+ 'GL_GREEN_BITS',
+ 'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
+ 'GL_IMPLEMENTATION_COLOR_READ_TYPE',
+ 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
+ 'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
+ 'GL_MAX_RENDERBUFFER_SIZE',
+ 'GL_MAX_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_TEXTURE_SIZE',
+ 'GL_MAX_VARYING_VECTORS',
+ 'GL_MAX_VERTEX_ATTRIBS',
+ 'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_VERTEX_UNIFORM_VECTORS',
+ 'GL_MAX_VIEWPORT_DIMS',
+ 'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
+ 'GL_NUM_SHADER_BINARY_FORMATS',
+ 'GL_PACK_ALIGNMENT',
+ 'GL_RED_BITS',
+ 'GL_RENDERBUFFER_BINDING',
+ 'GL_SAMPLE_BUFFERS',
+ 'GL_SAMPLE_COVERAGE_INVERT',
+ 'GL_SAMPLE_COVERAGE_VALUE',
+ 'GL_SAMPLES',
+ 'GL_SCISSOR_BOX',
+ 'GL_SHADER_BINARY_FORMATS',
+ 'GL_SHADER_COMPILER',
+ 'GL_SUBPIXEL_BITS',
+ 'GL_STENCIL_BITS',
+ 'GL_TEXTURE_BINDING_2D',
+ 'GL_TEXTURE_BINDING_CUBE_MAP',
+ 'GL_UNPACK_ALIGNMENT',
+ 'GL_UNPACK_FLIP_Y_CHROMIUM',
+ 'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
+ 'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
+ 'GL_BIND_GENERATES_RESOURCE_CHROMIUM',
+ # we can add this because we emulate it if the driver does not support it.
+ 'GL_VERTEX_ARRAY_BINDING_OES',
+ 'GL_VIEWPORT',
+ ],
+ 'invalid': [
+ 'GL_FOG_HINT',
+ ],
+ },
+ 'GetTexParamTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_2D',
+ 'GL_TEXTURE_CUBE_MAP',
+ ],
+ 'invalid': [
+ 'GL_PROXY_TEXTURE_CUBE_MAP',
+ ]
+ },
+ 'TextureTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_2D',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
+ ],
+ 'invalid': [
+ 'GL_PROXY_TEXTURE_CUBE_MAP',
+ ]
+ },
+ 'TextureBindTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_2D',
+ 'GL_TEXTURE_CUBE_MAP',
+ ],
+ 'invalid': [
+ 'GL_TEXTURE_1D',
+ 'GL_TEXTURE_3D',
+ ],
+ },
+ 'ShaderType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_VERTEX_SHADER',
+ 'GL_FRAGMENT_SHADER',
+ ],
+ 'invalid': [
+ 'GL_GEOMETRY_SHADER',
+ ],
+ },
+ 'FaceType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_FRONT',
+ 'GL_BACK',
+ 'GL_FRONT_AND_BACK',
+ ],
+ },
+ 'FaceMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_CW',
+ 'GL_CCW',
+ ],
+ },
+ 'CmpFunction': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_NEVER',
+ 'GL_LESS',
+ 'GL_EQUAL',
+ 'GL_LEQUAL',
+ 'GL_GREATER',
+ 'GL_NOTEQUAL',
+ 'GL_GEQUAL',
+ 'GL_ALWAYS',
+ ],
+ },
+ 'Equation': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_FUNC_ADD',
+ 'GL_FUNC_SUBTRACT',
+ 'GL_FUNC_REVERSE_SUBTRACT',
+ ],
+ 'invalid': [
+ 'GL_MIN',
+ 'GL_MAX',
+ ],
+ },
+ 'SrcBlendFactor': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ZERO',
+ 'GL_ONE',
+ 'GL_SRC_COLOR',
+ 'GL_ONE_MINUS_SRC_COLOR',
+ 'GL_DST_COLOR',
+ 'GL_ONE_MINUS_DST_COLOR',
+ 'GL_SRC_ALPHA',
+ 'GL_ONE_MINUS_SRC_ALPHA',
+ 'GL_DST_ALPHA',
+ 'GL_ONE_MINUS_DST_ALPHA',
+ 'GL_CONSTANT_COLOR',
+ 'GL_ONE_MINUS_CONSTANT_COLOR',
+ 'GL_CONSTANT_ALPHA',
+ 'GL_ONE_MINUS_CONSTANT_ALPHA',
+ 'GL_SRC_ALPHA_SATURATE',
+ ],
+ },
+ 'DstBlendFactor': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ZERO',
+ 'GL_ONE',
+ 'GL_SRC_COLOR',
+ 'GL_ONE_MINUS_SRC_COLOR',
+ 'GL_DST_COLOR',
+ 'GL_ONE_MINUS_DST_COLOR',
+ 'GL_SRC_ALPHA',
+ 'GL_ONE_MINUS_SRC_ALPHA',
+ 'GL_DST_ALPHA',
+ 'GL_ONE_MINUS_DST_ALPHA',
+ 'GL_CONSTANT_COLOR',
+ 'GL_ONE_MINUS_CONSTANT_COLOR',
+ 'GL_CONSTANT_ALPHA',
+ 'GL_ONE_MINUS_CONSTANT_ALPHA',
+ ],
+ },
+ 'Capability': {
+ 'type': 'GLenum',
+ 'valid': ["GL_%s" % cap['name'].upper() for cap in _CAPABILITY_FLAGS],
+ 'invalid': [
+ 'GL_CLIP_PLANE0',
+ 'GL_POINT_SPRITE',
+ ],
+ },
+ 'DrawMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_POINTS',
+ 'GL_LINE_STRIP',
+ 'GL_LINE_LOOP',
+ 'GL_LINES',
+ 'GL_TRIANGLE_STRIP',
+ 'GL_TRIANGLE_FAN',
+ 'GL_TRIANGLES',
+ ],
+ 'invalid': [
+ 'GL_QUADS',
+ 'GL_POLYGON',
+ ],
+ },
+ 'IndexType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_UNSIGNED_BYTE',
+ 'GL_UNSIGNED_SHORT',
+ ],
+ 'invalid': [
+ 'GL_UNSIGNED_INT',
+ 'GL_INT',
+ ],
+ },
+ 'GetMaxIndexType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_UNSIGNED_BYTE',
+ 'GL_UNSIGNED_SHORT',
+ 'GL_UNSIGNED_INT',
+ ],
+ 'invalid': [
+ 'GL_INT',
+ ],
+ },
+ 'Attachment': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_COLOR_ATTACHMENT0',
+ 'GL_DEPTH_ATTACHMENT',
+ 'GL_STENCIL_ATTACHMENT',
+ ],
+ },
+ 'BackbufferAttachment': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_COLOR_EXT',
+ 'GL_DEPTH_EXT',
+ 'GL_STENCIL_EXT',
+ ],
+ },
+ 'BufferParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_BUFFER_SIZE',
+ 'GL_BUFFER_USAGE',
+ ],
+ 'invalid': [
+ 'GL_PIXEL_PACK_BUFFER',
+ ],
+ },
+ 'FrameBufferParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE',
+ 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME',
+ 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL',
+ 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE',
+ ],
+ },
+ 'MatrixMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_PATH_PROJECTION_CHROMIUM',
+ 'GL_PATH_MODELVIEW_CHROMIUM',
+ ],
+ },
+ 'ProgramParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_DELETE_STATUS',
+ 'GL_LINK_STATUS',
+ 'GL_VALIDATE_STATUS',
+ 'GL_INFO_LOG_LENGTH',
+ 'GL_ATTACHED_SHADERS',
+ 'GL_ACTIVE_ATTRIBUTES',
+ 'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
+ 'GL_ACTIVE_UNIFORMS',
+ 'GL_ACTIVE_UNIFORM_MAX_LENGTH',
+ ],
+ },
+ 'QueryObjectParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_QUERY_RESULT_EXT',
+ 'GL_QUERY_RESULT_AVAILABLE_EXT',
+ ],
+ },
+ 'QueryParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_CURRENT_QUERY_EXT',
+ ],
+ },
+ 'QueryTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ANY_SAMPLES_PASSED_EXT',
+ 'GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT',
+ 'GL_COMMANDS_ISSUED_CHROMIUM',
+ 'GL_LATENCY_QUERY_CHROMIUM',
+ 'GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM',
+ 'GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM',
+ 'GL_COMMANDS_COMPLETED_CHROMIUM',
+ ],
+ },
+ 'RenderBufferParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RENDERBUFFER_RED_SIZE',
+ 'GL_RENDERBUFFER_GREEN_SIZE',
+ 'GL_RENDERBUFFER_BLUE_SIZE',
+ 'GL_RENDERBUFFER_ALPHA_SIZE',
+ 'GL_RENDERBUFFER_DEPTH_SIZE',
+ 'GL_RENDERBUFFER_STENCIL_SIZE',
+ 'GL_RENDERBUFFER_WIDTH',
+ 'GL_RENDERBUFFER_HEIGHT',
+ 'GL_RENDERBUFFER_INTERNAL_FORMAT',
+ ],
+ },
+ 'ShaderParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_SHADER_TYPE',
+ 'GL_DELETE_STATUS',
+ 'GL_COMPILE_STATUS',
+ 'GL_INFO_LOG_LENGTH',
+ 'GL_SHADER_SOURCE_LENGTH',
+ 'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
+ ],
+ },
+ 'ShaderPrecision': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_LOW_FLOAT',
+ 'GL_MEDIUM_FLOAT',
+ 'GL_HIGH_FLOAT',
+ 'GL_LOW_INT',
+ 'GL_MEDIUM_INT',
+ 'GL_HIGH_INT',
+ ],
+ },
+ 'StringType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_VENDOR',
+ 'GL_RENDERER',
+ 'GL_VERSION',
+ 'GL_SHADING_LANGUAGE_VERSION',
+ 'GL_EXTENSIONS',
+ ],
+ },
+ 'TextureParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_MAG_FILTER',
+ 'GL_TEXTURE_MIN_FILTER',
+ 'GL_TEXTURE_POOL_CHROMIUM',
+ 'GL_TEXTURE_WRAP_S',
+ 'GL_TEXTURE_WRAP_T',
+ ],
+ 'invalid': [
+ 'GL_GENERATE_MIPMAP',
+ ],
+ },
+ 'TexturePool': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_POOL_MANAGED_CHROMIUM',
+ 'GL_TEXTURE_POOL_UNMANAGED_CHROMIUM',
+ ],
+ },
+ 'TextureWrapMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_CLAMP_TO_EDGE',
+ 'GL_MIRRORED_REPEAT',
+ 'GL_REPEAT',
+ ],
+ },
+ 'TextureMinFilterMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_NEAREST',
+ 'GL_LINEAR',
+ 'GL_NEAREST_MIPMAP_NEAREST',
+ 'GL_LINEAR_MIPMAP_NEAREST',
+ 'GL_NEAREST_MIPMAP_LINEAR',
+ 'GL_LINEAR_MIPMAP_LINEAR',
+ ],
+ },
+ 'TextureMagFilterMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_NEAREST',
+ 'GL_LINEAR',
+ ],
+ },
+ 'TextureUsage': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_NONE',
+ 'GL_FRAMEBUFFER_ATTACHMENT_ANGLE',
+ ],
+ },
+ 'VertexAttribute': {
+ 'type': 'GLenum',
+ 'valid': [
+ # some enum that the decoder actually passes through to GL needs
+ # to be the first listed here since it's used in unit tests.
+ 'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
+ 'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
+ 'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
+ 'GL_VERTEX_ATTRIB_ARRAY_SIZE',
+ 'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
+ 'GL_VERTEX_ATTRIB_ARRAY_TYPE',
+ 'GL_CURRENT_VERTEX_ATTRIB',
+ ],
+ },
+ 'VertexPointer': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_VERTEX_ATTRIB_ARRAY_POINTER',
+ ],
+ },
+ 'HintTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_GENERATE_MIPMAP_HINT',
+ ],
+ 'invalid': [
+ 'GL_PERSPECTIVE_CORRECTION_HINT',
+ ],
+ },
+ 'HintMode': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_FASTEST',
+ 'GL_NICEST',
+ 'GL_DONT_CARE',
+ ],
+ },
+ 'PixelStore': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_PACK_ALIGNMENT',
+ 'GL_UNPACK_ALIGNMENT',
+ 'GL_UNPACK_FLIP_Y_CHROMIUM',
+ 'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
+ 'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
+ ],
+ 'invalid': [
+ 'GL_PACK_SWAP_BYTES',
+ 'GL_UNPACK_SWAP_BYTES',
+ ],
+ },
+ 'PixelStoreAlignment': {
+ 'type': 'GLint',
+ 'valid': [
+ '1',
+ '2',
+ '4',
+ '8',
+ ],
+ 'invalid': [
+ '3',
+ '9',
+ ],
+ },
+ 'ReadPixelFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ALPHA',
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ },
+ 'PixelType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_UNSIGNED_BYTE',
+ 'GL_UNSIGNED_SHORT_5_6_5',
+ 'GL_UNSIGNED_SHORT_4_4_4_4',
+ 'GL_UNSIGNED_SHORT_5_5_5_1',
+ ],
+ 'invalid': [
+ 'GL_SHORT',
+ 'GL_INT',
+ ],
+ },
+ 'ReadPixelType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_UNSIGNED_BYTE',
+ 'GL_UNSIGNED_SHORT_5_6_5',
+ 'GL_UNSIGNED_SHORT_4_4_4_4',
+ 'GL_UNSIGNED_SHORT_5_5_5_1',
+ ],
+ 'invalid': [
+ 'GL_SHORT',
+ 'GL_INT',
+ ],
+ },
+ 'RenderBufferFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RGBA4',
+ 'GL_RGB565',
+ 'GL_RGB5_A1',
+ 'GL_DEPTH_COMPONENT16',
+ 'GL_STENCIL_INDEX8',
+ ],
+ },
+ 'ShaderBinaryFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ ],
+ },
+ 'StencilOp': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_KEEP',
+ 'GL_ZERO',
+ 'GL_REPLACE',
+ 'GL_INCR',
+ 'GL_INCR_WRAP',
+ 'GL_DECR',
+ 'GL_DECR_WRAP',
+ 'GL_INVERT',
+ ],
+ },
+ 'TextureFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ALPHA',
+ 'GL_LUMINANCE',
+ 'GL_LUMINANCE_ALPHA',
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ 'invalid': [
+ 'GL_BGRA',
+ 'GL_BGR',
+ ],
+ },
+ 'TextureInternalFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ALPHA',
+ 'GL_LUMINANCE',
+ 'GL_LUMINANCE_ALPHA',
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ 'invalid': [
+ 'GL_BGRA',
+ 'GL_BGR',
+ ],
+ },
+ 'TextureInternalFormatStorage': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RGB565',
+ 'GL_RGBA4',
+ 'GL_RGB5_A1',
+ 'GL_ALPHA8_EXT',
+ 'GL_LUMINANCE8_EXT',
+ 'GL_LUMINANCE8_ALPHA8_EXT',
+ 'GL_RGB8_OES',
+ 'GL_RGBA8_OES',
+ ],
+ },
+ 'ImageInternalFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ },
+ 'ImageUsage': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_MAP_CHROMIUM',
+ 'GL_SCANOUT_CHROMIUM'
+ ],
+ },
+ 'VertexAttribType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_BYTE',
+ 'GL_UNSIGNED_BYTE',
+ 'GL_SHORT',
+ 'GL_UNSIGNED_SHORT',
+ # 'GL_FIXED', // This is not available on Desktop GL.
+ 'GL_FLOAT',
+ ],
+ 'invalid': [
+ 'GL_DOUBLE',
+ ],
+ },
+ 'TextureBorder': {
+ 'type': 'GLint',
+ 'is_complete': True,
+ 'valid': [
+ '0',
+ ],
+ 'invalid': [
+ '1',
+ ],
+ },
+ 'VertexAttribSize': {
+ 'type': 'GLint',
+ 'valid': [
+ '1',
+ '2',
+ '3',
+ '4',
+ ],
+ 'invalid': [
+ '0',
+ '5',
+ ],
+ },
+ 'ZeroOnly': {
+ 'type': 'GLint',
+ 'is_complete': True,
+ 'valid': [
+ '0',
+ ],
+ 'invalid': [
+ '1',
+ ],
+ },
+ 'FalseOnly': {
+ 'type': 'GLboolean',
+ 'is_complete': True,
+ 'valid': [
+ 'false',
+ ],
+ 'invalid': [
+ 'true',
+ ],
+ },
+ 'ResetStatus': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_GUILTY_CONTEXT_RESET_ARB',
+ 'GL_INNOCENT_CONTEXT_RESET_ARB',
+ 'GL_UNKNOWN_CONTEXT_RESET_ARB',
+ ],
+ },
+}
+
+# This table specifies the different pepper interfaces that are supported for
+# GL commands. 'dev' is true if it's a dev interface.
+_PEPPER_INTERFACES = [
+ {'name': '', 'dev': False},
+ {'name': 'InstancedArrays', 'dev': False},
+ {'name': 'FramebufferBlit', 'dev': False},
+ {'name': 'FramebufferMultisample', 'dev': False},
+ {'name': 'ChromiumEnableFeature', 'dev': False},
+ {'name': 'ChromiumMapSub', 'dev': False},
+ {'name': 'Query', 'dev': False},
+ {'name': 'VertexArrayObject', 'dev': False},
+ {'name': 'DrawBuffers', 'dev': True},
+]
+
+# A function info object specifies the type and other special data for the
+# command that will be generated. A base function info object is generated by
+# parsing the "cmd_buffer_functions.txt", one for each function in the
+# file. These function info objects can be augmented and their values can be
+# overridden by adding an object to the table below.
+#
+# Must match function names specified in "cmd_buffer_functions.txt".
+#
+# cmd_comment: A comment added to the cmd format.
+# type: defines which handler will be used to generate code.
+# decoder_func: defines which function to call in the decoder to execute the
+# corresponding GL command. If not specified the GL command will
+# be called directly.
+# gl_test_func: GL function that is expected to be called when testing.
+# cmd_args: The arguments to use for the command. This overrides generating
+# them based on the GL function arguments.
+# gen_cmd: Whether or not this function geneates a command. Default = True.
+# data_transfer_methods: Array of methods that are used for transfering the
+# pointer data. Possible values: 'immediate', 'shm', 'bucket'.
+# The default is 'immediate' if the command has one pointer
+# argument, otherwise 'shm'. One command is generated for each
+# transfer method. Affects only commands which are not of type
+# 'HandWritten', 'GETn' or 'GLcharN'.
+# Note: the command arguments that affect this are the final args,
+# taking cmd_args override into consideration.
+# impl_func: Whether or not to generate the GLES2Implementation part of this
+# command.
+# impl_decl: Whether or not to generate the GLES2Implementation declaration
+# for this command.
+# needs_size: If true a data_size field is added to the command.
+# count: The number of units per element. For PUTn or PUT types.
+# unit_test: If False no service side unit test will be generated.
+# client_test: If False no client side unit test will be generated.
+# expectation: If False the unit test will have no expected calls.
+# gen_func: Name of function that generates GL resource for corresponding
+# bind function.
+# states: array of states that get set by this function corresponding to
+# the given arguments
+# state_flag: name of flag that is set to true when function is called.
+# no_gl: no GL function is called.
+# valid_args: A dictionary of argument indices to args to use in unit tests
+# when they can not be automatically determined.
+# pepper_interface: The pepper interface that is used for this extension
+# pepper_name: The name of the function as exposed to pepper.
+# pepper_args: A string representing the argument list (what would appear in
+# C/C++ between the parentheses for the function declaration)
+# that the Pepper API expects for this function. Use this only if
+# the stable Pepper API differs from the GLES2 argument list.
+# invalid_test: False if no invalid test needed.
+# shadowed: True = the value is shadowed so no glGetXXX call will be made.
+# first_element_only: For PUT types, True if only the first element of an
+# array is used and we end up calling the single value
+# corresponding function. eg. TexParameteriv -> TexParameteri
+# extension: Function is an extension to GL and should not be exposed to
+# pepper unless pepper_interface is defined.
+# extension_flag: Function is an extension and should be enabled only when
+# the corresponding feature info flag is enabled. Implies
+# 'extension': True.
+# not_shared: For GENn types, True if objects can't be shared between contexts
+
+_FUNCTION_INFO = {
+ 'ActiveTexture': {
+ 'decoder_func': 'DoActiveTexture',
+ 'unit_test': False,
+ 'impl_func': False,
+ 'client_test': False,
+ },
+ 'AttachShader': {'decoder_func': 'DoAttachShader'},
+ 'BindAttribLocation': {
+ 'type': 'GLchar',
+ 'data_transfer_methods': ['bucket'],
+ 'needs_size': True,
+ },
+ 'BindBuffer': {
+ 'type': 'Bind',
+ 'decoder_func': 'DoBindBuffer',
+ 'gen_func': 'GenBuffersARB',
+ },
+ 'BindFramebuffer': {
+ 'type': 'Bind',
+ 'decoder_func': 'DoBindFramebuffer',
+ 'gl_test_func': 'glBindFramebufferEXT',
+ 'gen_func': 'GenFramebuffersEXT',
+ 'trace_level': 1,
+ },
+ 'BindRenderbuffer': {
+ 'type': 'Bind',
+ 'decoder_func': 'DoBindRenderbuffer',
+ 'gl_test_func': 'glBindRenderbufferEXT',
+ 'gen_func': 'GenRenderbuffersEXT',
+ },
+ 'BindTexture': {
+ 'type': 'Bind',
+ 'decoder_func': 'DoBindTexture',
+ 'gen_func': 'GenTextures',
+ # TODO(gman): remove this once client side caching works.
+ 'client_test': False,
+ 'trace_level': 1,
+ },
+ 'BlitFramebufferCHROMIUM': {
+ 'decoder_func': 'DoBlitFramebufferCHROMIUM',
+ 'unit_test': False,
+ 'extension_flag': 'chromium_framebuffer_multisample',
+ 'pepper_interface': 'FramebufferBlit',
+ 'pepper_name': 'BlitFramebufferEXT',
+ 'defer_reads': True,
+ 'defer_draws': True,
+ 'trace_level': 1,
+ },
+ 'BufferData': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['shm'],
+ 'client_test': False,
+ },
+ 'BufferSubData': {
+ 'type': 'Data',
+ 'client_test': False,
+ 'decoder_func': 'DoBufferSubData',
+ 'data_transfer_methods': ['shm'],
+ },
+ 'CheckFramebufferStatus': {
+ 'type': 'Is',
+ 'decoder_func': 'DoCheckFramebufferStatus',
+ 'gl_test_func': 'glCheckFramebufferStatusEXT',
+ 'error_value': 'GL_FRAMEBUFFER_UNSUPPORTED',
+ 'result': ['GLenum'],
+ },
+ 'Clear': {
+ 'decoder_func': 'DoClear',
+ 'defer_draws': True,
+ 'trace_level': 1,
+ },
+ 'ClearColor': {
+ 'type': 'StateSet',
+ 'state': 'ClearColor',
+ },
+ 'ClearDepthf': {
+ 'type': 'StateSet',
+ 'state': 'ClearDepthf',
+ 'decoder_func': 'glClearDepth',
+ 'gl_test_func': 'glClearDepth',
+ 'valid_args': {
+ '0': '0.5f'
+ },
+ },
+ 'ColorMask': {
+ 'type': 'StateSet',
+ 'state': 'ColorMask',
+ 'no_gl': True,
+ 'expectation': False,
+ },
+ 'ConsumeTextureCHROMIUM': {
+ 'decoder_func': 'DoConsumeTextureCHROMIUM',
+ 'impl_func': False,
+ 'type': 'PUT',
+ 'count': 64, # GL_MAILBOX_SIZE_CHROMIUM
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': "CHROMIUM_texture_mailbox",
+ 'chromium': True,
+ 'trace_level': 1,
+ },
+ 'CreateAndConsumeTextureCHROMIUM': {
+ 'decoder_func': 'DoCreateAndConsumeTextureCHROMIUM',
+ 'impl_func': False,
+ 'type': 'HandWritten',
+ 'data_transfer_methods': ['immediate'],
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': "CHROMIUM_texture_mailbox",
+ 'chromium': True,
+ },
+ 'ClearStencil': {
+ 'type': 'StateSet',
+ 'state': 'ClearStencil',
+ },
+ 'EnableFeatureCHROMIUM': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'decoder_func': 'DoEnableFeatureCHROMIUM',
+ 'expectation': False,
+ 'cmd_args': 'GLuint bucket_id, GLint* result',
+ 'result': ['GLint'],
+ 'extension': True,
+ 'chromium': True,
+ 'pepper_interface': 'ChromiumEnableFeature',
+ },
+ 'CompileShader': {'decoder_func': 'DoCompileShader', 'unit_test': False},
+ 'CompressedTexImage2D': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['bucket', 'shm'],
+ },
+ 'CompressedTexSubImage2D': {
+ 'type': 'Data',
+ 'data_transfer_methods': ['bucket', 'shm'],
+ 'decoder_func': 'DoCompressedTexSubImage2D',
+ },
+ 'CopyTexImage2D': {
+ 'decoder_func': 'DoCopyTexImage2D',
+ 'unit_test': False,
+ 'defer_reads': True,
+ },
+ 'CopyTexSubImage2D': {
+ 'decoder_func': 'DoCopyTexSubImage2D',
+ 'defer_reads': True,
+ },
+ 'CreateImageCHROMIUM': {
+ 'type': 'Manual',
+ 'cmd_args':
+ 'GLsizei width, GLsizei height, GLenum internalformat, GLenum usage',
+ 'result': ['GLuint'],
+ 'client_test': False,
+ 'gen_cmd': False,
+ 'expectation': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'DestroyImageCHROMIUM': {
+ 'type': 'Manual',
+ 'client_test': False,
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'GetImageParameterivCHROMIUM': {
+ 'type': 'Manual',
+ 'client_test': False,
+ 'gen_cmd': False,
+ 'expectation': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'CreateGpuMemoryBufferImageCHROMIUM': {
+ 'type': 'Manual',
+ 'cmd_args':
+ 'GLsizei width, GLsizei height, GLenum internalformat, GLenum usage',
+ 'result': ['GLuint'],
+ 'client_test': False,
+ 'gen_cmd': False,
+ 'expectation': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'CreateProgram': {
+ 'type': 'Create',
+ 'client_test': False,
+ },
+ 'CreateShader': {
+ 'type': 'Create',
+ 'client_test': False,
+ },
+ 'BlendColor': {
+ 'type': 'StateSet',
+ 'state': 'BlendColor',
+ },
+ 'BlendEquation': {
+ 'type': 'StateSetRGBAlpha',
+ 'state': 'BlendEquation',
+ 'valid_args': {
+ '0': 'GL_FUNC_SUBTRACT'
+ },
+ },
+ 'BlendEquationSeparate': {
+ 'type': 'StateSet',
+ 'state': 'BlendEquation',
+ 'valid_args': {
+ '0': 'GL_FUNC_SUBTRACT'
+ },
+ },
+ 'BlendFunc': {
+ 'type': 'StateSetRGBAlpha',
+ 'state': 'BlendFunc',
+ },
+ 'BlendFuncSeparate': {
+ 'type': 'StateSet',
+ 'state': 'BlendFunc',
+ },
+ 'SampleCoverage': {'decoder_func': 'DoSampleCoverage'},
+ 'StencilFunc': {
+ 'type': 'StateSetFrontBack',
+ 'state': 'StencilFunc',
+ },
+ 'StencilFuncSeparate': {
+ 'type': 'StateSetFrontBackSeparate',
+ 'state': 'StencilFunc',
+ },
+ 'StencilOp': {
+ 'type': 'StateSetFrontBack',
+ 'state': 'StencilOp',
+ 'valid_args': {
+ '1': 'GL_INCR'
+ },
+ },
+ 'StencilOpSeparate': {
+ 'type': 'StateSetFrontBackSeparate',
+ 'state': 'StencilOp',
+ 'valid_args': {
+ '1': 'GL_INCR'
+ },
+ },
+ 'Hint': {
+ 'type': 'StateSetNamedParameter',
+ 'state': 'Hint',
+ },
+ 'CullFace': {'type': 'StateSet', 'state': 'CullFace'},
+ 'FrontFace': {'type': 'StateSet', 'state': 'FrontFace'},
+ 'DepthFunc': {'type': 'StateSet', 'state': 'DepthFunc'},
+ 'LineWidth': {
+ 'type': 'StateSet',
+ 'state': 'LineWidth',
+ 'valid_args': {
+ '0': '0.5f'
+ },
+ },
+ 'PolygonOffset': {
+ 'type': 'StateSet',
+ 'state': 'PolygonOffset',
+ },
+ 'DeleteBuffers': {
+ 'type': 'DELn',
+ 'gl_test_func': 'glDeleteBuffersARB',
+ 'resource_type': 'Buffer',
+ 'resource_types': 'Buffers',
+ },
+ 'DeleteFramebuffers': {
+ 'type': 'DELn',
+ 'gl_test_func': 'glDeleteFramebuffersEXT',
+ 'resource_type': 'Framebuffer',
+ 'resource_types': 'Framebuffers',
+ },
+ 'DeleteProgram': {'type': 'Delete', 'decoder_func': 'DoDeleteProgram'},
+ 'DeleteRenderbuffers': {
+ 'type': 'DELn',
+ 'gl_test_func': 'glDeleteRenderbuffersEXT',
+ 'resource_type': 'Renderbuffer',
+ 'resource_types': 'Renderbuffers',
+ },
+ 'DeleteShader': {'type': 'Delete', 'decoder_func': 'DoDeleteShader'},
+ 'DeleteTextures': {
+ 'type': 'DELn',
+ 'resource_type': 'Texture',
+ 'resource_types': 'Textures',
+ },
+ 'DepthRangef': {
+ 'decoder_func': 'DoDepthRangef',
+ 'gl_test_func': 'glDepthRange',
+ },
+ 'DepthMask': {
+ 'type': 'StateSet',
+ 'state': 'DepthMask',
+ 'no_gl': True,
+ 'expectation': False,
+ },
+ 'DetachShader': {'decoder_func': 'DoDetachShader'},
+ 'Disable': {
+ 'decoder_func': 'DoDisable',
+ 'impl_func': False,
+ 'client_test': False,
+ },
+ 'DisableVertexAttribArray': {
+ 'decoder_func': 'DoDisableVertexAttribArray',
+ 'impl_decl': False,
+ },
+ 'DrawArrays': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count',
+ 'defer_draws': True,
+ 'trace_level': 2,
+ },
+ 'DrawElements': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
+ 'GLenumIndexType type, GLuint index_offset',
+ 'client_test': False,
+ 'defer_draws': True,
+ 'trace_level': 2,
+ },
+ 'Enable': {
+ 'decoder_func': 'DoEnable',
+ 'impl_func': False,
+ 'client_test': False,
+ },
+ 'EnableVertexAttribArray': {
+ 'decoder_func': 'DoEnableVertexAttribArray',
+ 'impl_decl': False,
+ },
+ 'Finish': {
+ 'impl_func': False,
+ 'client_test': False,
+ 'decoder_func': 'DoFinish',
+ 'defer_reads': True,
+ },
+ 'Flush': {
+ 'impl_func': False,
+ 'decoder_func': 'DoFlush',
+ },
+ 'FramebufferRenderbuffer': {
+ 'decoder_func': 'DoFramebufferRenderbuffer',
+ 'gl_test_func': 'glFramebufferRenderbufferEXT',
+ },
+ 'FramebufferTexture2D': {
+ 'decoder_func': 'DoFramebufferTexture2D',
+ 'gl_test_func': 'glFramebufferTexture2DEXT',
+ 'trace_level': 1,
+ },
+ 'FramebufferTexture2DMultisampleEXT': {
+ 'decoder_func': 'DoFramebufferTexture2DMultisample',
+ 'gl_test_func': 'glFramebufferTexture2DMultisampleEXT',
+ 'expectation': False,
+ 'unit_test': False,
+ 'extension_flag': 'multisampled_render_to_texture',
+ 'trace_level': 1,
+ },
+ 'GenerateMipmap': {
+ 'decoder_func': 'DoGenerateMipmap',
+ 'gl_test_func': 'glGenerateMipmapEXT',
+ },
+ 'GenBuffers': {
+ 'type': 'GENn',
+ 'gl_test_func': 'glGenBuffersARB',
+ 'resource_type': 'Buffer',
+ 'resource_types': 'Buffers',
+ },
+ 'GenMailboxCHROMIUM': {
+ 'type': 'HandWritten',
+ 'impl_func': False,
+ 'extension': "CHROMIUM_texture_mailbox",
+ 'chromium': True,
+ },
+ 'GenFramebuffers': {
+ 'type': 'GENn',
+ 'gl_test_func': 'glGenFramebuffersEXT',
+ 'resource_type': 'Framebuffer',
+ 'resource_types': 'Framebuffers',
+ },
+ 'GenRenderbuffers': {
+ 'type': 'GENn', 'gl_test_func': 'glGenRenderbuffersEXT',
+ 'resource_type': 'Renderbuffer',
+ 'resource_types': 'Renderbuffers',
+ },
+ 'GenTextures': {
+ 'type': 'GENn',
+ 'gl_test_func': 'glGenTextures',
+ 'resource_type': 'Texture',
+ 'resource_types': 'Textures',
+ },
+ 'GetActiveAttrib': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
+ 'void* result',
+ 'result': [
+ 'int32_t success',
+ 'int32_t size',
+ 'uint32_t type',
+ ],
+ },
+ 'GetActiveUniform': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
+ 'void* result',
+ 'result': [
+ 'int32_t success',
+ 'int32_t size',
+ 'uint32_t type',
+ ],
+ },
+ 'GetAttachedShaders': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args': 'GLidProgram program, void* result, uint32_t result_size',
+ 'result': ['SizedResult<GLuint>'],
+ },
+ 'GetAttribLocation': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, uint32_t name_bucket_id, GLint* location',
+ 'result': ['GLint'],
+ 'error_return': -1, # http://www.opengl.org/sdk/docs/man/xhtml/glGetAttribLocation.xml
+ },
+ 'GetBooleanv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLboolean>'],
+ 'decoder_func': 'DoGetBooleanv',
+ 'gl_test_func': 'glGetBooleanv',
+ },
+ 'GetBufferParameteriv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLint>'],
+ 'decoder_func': 'DoGetBufferParameteriv',
+ 'expectation': False,
+ 'shadowed': True,
+ },
+ 'GetError': {
+ 'type': 'Is',
+ 'decoder_func': 'GetErrorState()->GetGLError',
+ 'impl_func': False,
+ 'result': ['GLenum'],
+ 'client_test': False,
+ },
+ 'GetFloatv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLfloat>'],
+ 'decoder_func': 'DoGetFloatv',
+ 'gl_test_func': 'glGetFloatv',
+ },
+ 'GetFramebufferAttachmentParameteriv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetFramebufferAttachmentParameteriv',
+ 'gl_test_func': 'glGetFramebufferAttachmentParameterivEXT',
+ 'result': ['SizedResult<GLint>'],
+ },
+ 'GetIntegerv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLint>'],
+ 'decoder_func': 'DoGetIntegerv',
+ 'client_test': False,
+ },
+ 'GetMaxValueInBufferCHROMIUM': {
+ 'type': 'Is',
+ 'decoder_func': 'DoGetMaxValueInBufferCHROMIUM',
+ 'result': ['GLuint'],
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ 'impl_func': False,
+ },
+ 'GetMultipleIntegervCHROMIUM': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'expectation': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'GetProgramiv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetProgramiv',
+ 'result': ['SizedResult<GLint>'],
+ 'expectation': False,
+ },
+ 'GetProgramInfoCHROMIUM': {
+ 'type': 'Custom',
+ 'expectation': False,
+ 'impl_func': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ 'cmd_args': 'GLidProgram program, uint32_t bucket_id',
+ 'result': [
+ 'uint32_t link_status',
+ 'uint32_t num_attribs',
+ 'uint32_t num_uniforms',
+ ],
+ },
+ 'GetProgramInfoLog': {
+ 'type': 'STRn',
+ 'expectation': False,
+ },
+ 'GetRenderbufferParameteriv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetRenderbufferParameteriv',
+ 'gl_test_func': 'glGetRenderbufferParameterivEXT',
+ 'result': ['SizedResult<GLint>'],
+ },
+ 'GetShaderiv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetShaderiv',
+ 'result': ['SizedResult<GLint>'],
+ },
+ 'GetShaderInfoLog': {
+ 'type': 'STRn',
+ 'get_len_func': 'glGetShaderiv',
+ 'get_len_enum': 'GL_INFO_LOG_LENGTH',
+ 'unit_test': False,
+ },
+ 'GetShaderPrecisionFormat': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLenumShaderType shadertype, GLenumShaderPrecision precisiontype, '
+ 'void* result',
+ 'result': [
+ 'int32_t success',
+ 'int32_t min_range',
+ 'int32_t max_range',
+ 'int32_t precision',
+ ],
+ },
+ 'GetShaderSource': {
+ 'type': 'STRn',
+ 'get_len_func': 'DoGetShaderiv',
+ 'get_len_enum': 'GL_SHADER_SOURCE_LENGTH',
+ 'unit_test': False,
+ 'client_test': False,
+ },
+ 'GetString': {
+ 'type': 'Custom',
+ 'client_test': False,
+ 'cmd_args': 'GLenumStringType name, uint32_t bucket_id',
+ },
+ 'GetTexParameterfv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetTexParameterfv',
+ 'result': ['SizedResult<GLfloat>']
+ },
+ 'GetTexParameteriv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetTexParameteriv',
+ 'result': ['SizedResult<GLint>']
+ },
+ 'GetTranslatedShaderSourceANGLE': {
+ 'type': 'STRn',
+ 'get_len_func': 'DoGetShaderiv',
+ 'get_len_enum': 'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
+ 'unit_test': False,
+ 'extension': True,
+ },
+ 'GetUniformfv': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'result': ['SizedResult<GLfloat>'],
+ },
+ 'GetUniformiv': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'result': ['SizedResult<GLint>'],
+ },
+ 'GetUniformLocation': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, uint32_t name_bucket_id, GLint* location',
+ 'result': ['GLint'],
+ 'error_return': -1, # http://www.opengl.org/sdk/docs/man/xhtml/glGetUniformLocation.xml
+ },
+ 'GetVertexAttribfv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLfloat>'],
+ 'impl_decl': False,
+ 'decoder_func': 'DoGetVertexAttribfv',
+ 'expectation': False,
+ 'client_test': False,
+ },
+ 'GetVertexAttribiv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLint>'],
+ 'impl_decl': False,
+ 'decoder_func': 'DoGetVertexAttribiv',
+ 'expectation': False,
+ 'client_test': False,
+ },
+ 'GetVertexAttribPointerv': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'result': ['SizedResult<GLuint>'],
+ 'client_test': False,
+ },
+ 'IsBuffer': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsBuffer',
+ 'expectation': False,
+ },
+ 'IsEnabled': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsEnabled',
+ 'impl_func': False,
+ 'expectation': False,
+ },
+ 'IsFramebuffer': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsFramebuffer',
+ 'expectation': False,
+ },
+ 'IsProgram': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsProgram',
+ 'expectation': False,
+ },
+ 'IsRenderbuffer': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsRenderbuffer',
+ 'expectation': False,
+ },
+ 'IsShader': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsShader',
+ 'expectation': False,
+ },
+ 'IsTexture': {
+ 'type': 'Is',
+ 'decoder_func': 'DoIsTexture',
+ 'expectation': False,
+ },
+ 'LinkProgram': {
+ 'decoder_func': 'DoLinkProgram',
+ 'impl_func': False,
+ },
+ 'MapBufferCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'MapBufferSubDataCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ 'pepper_interface': 'ChromiumMapSub',
+ },
+ 'MapImageCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'MapTexSubImage2DCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ 'pepper_interface': 'ChromiumMapSub',
+ },
+ 'PixelStorei': {'type': 'Manual'},
+ 'PostSubBufferCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'ProduceTextureCHROMIUM': {
+ 'decoder_func': 'DoProduceTextureCHROMIUM',
+ 'impl_func': False,
+ 'type': 'PUT',
+ 'count': 64, # GL_MAILBOX_SIZE_CHROMIUM
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': "CHROMIUM_texture_mailbox",
+ 'chromium': True,
+ 'trace_level': 1,
+ },
+ 'ProduceTextureDirectCHROMIUM': {
+ 'decoder_func': 'DoProduceTextureDirectCHROMIUM',
+ 'impl_func': False,
+ 'type': 'PUT',
+ 'count': 64, # GL_MAILBOX_SIZE_CHROMIUM
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': "CHROMIUM_texture_mailbox",
+ 'chromium': True,
+ 'trace_level': 1,
+ },
+ 'RenderbufferStorage': {
+ 'decoder_func': 'DoRenderbufferStorage',
+ 'gl_test_func': 'glRenderbufferStorageEXT',
+ 'expectation': False,
+ },
+ 'RenderbufferStorageMultisampleCHROMIUM': {
+ 'cmd_comment':
+ '// GL_CHROMIUM_framebuffer_multisample\n',
+ 'decoder_func': 'DoRenderbufferStorageMultisampleCHROMIUM',
+ 'gl_test_func': 'glRenderbufferStorageMultisampleCHROMIUM',
+ 'expectation': False,
+ 'unit_test': False,
+ 'extension_flag': 'chromium_framebuffer_multisample',
+ 'pepper_interface': 'FramebufferMultisample',
+ 'pepper_name': 'RenderbufferStorageMultisampleEXT',
+ },
+ 'RenderbufferStorageMultisampleEXT': {
+ 'cmd_comment':
+ '// GL_EXT_multisampled_render_to_texture\n',
+ 'decoder_func': 'DoRenderbufferStorageMultisampleEXT',
+ 'gl_test_func': 'glRenderbufferStorageMultisampleEXT',
+ 'expectation': False,
+ 'unit_test': False,
+ 'extension_flag': 'multisampled_render_to_texture',
+ },
+ 'ReadPixels': {
+ 'cmd_comment':
+ '// ReadPixels has the result separated from the pixel buffer so that\n'
+ '// it is easier to specify the result going to some specific place\n'
+ '// that exactly fits the rectangle of pixels.\n',
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args':
+ 'GLint x, GLint y, GLsizei width, GLsizei height, '
+ 'GLenumReadPixelFormat format, GLenumReadPixelType type, '
+ 'uint32_t pixels_shm_id, uint32_t pixels_shm_offset, '
+ 'uint32_t result_shm_id, uint32_t result_shm_offset, '
+ 'GLboolean async',
+ 'result': ['uint32_t'],
+ 'defer_reads': True,
+ },
+ 'ReleaseShaderCompiler': {
+ 'decoder_func': 'DoReleaseShaderCompiler',
+ 'unit_test': False,
+ },
+ 'ShaderBinary': {
+ 'type': 'Custom',
+ 'client_test': False,
+ },
+ 'ShaderSource': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['bucket'],
+ 'needs_size': True,
+ 'client_test': False,
+ 'cmd_args':
+ 'GLuint shader, const char* data',
+ 'pepper_args':
+ 'GLuint shader, GLsizei count, const char** str, const GLint* length',
+ },
+ 'StencilMask': {
+ 'type': 'StateSetFrontBack',
+ 'state': 'StencilMask',
+ 'no_gl': True,
+ 'expectation': False,
+ },
+ 'StencilMaskSeparate': {
+ 'type': 'StateSetFrontBackSeparate',
+ 'state': 'StencilMask',
+ 'no_gl': True,
+ 'expectation': False,
+ },
+ 'SwapBuffers': {
+ 'impl_func': False,
+ 'decoder_func': 'DoSwapBuffers',
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': True,
+ 'trace_level': 1,
+ },
+ 'TexImage2D': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['shm'],
+ 'client_test': False,
+ },
+ 'TexParameterf': {
+ 'decoder_func': 'DoTexParameterf',
+ 'valid_args': {
+ '2': 'GL_NEAREST'
+ },
+ },
+ 'TexParameteri': {
+ 'decoder_func': 'DoTexParameteri',
+ 'valid_args': {
+ '2': 'GL_NEAREST'
+ },
+ },
+ 'TexParameterfv': {
+ 'type': 'PUT',
+ 'data_value': 'GL_NEAREST',
+ 'count': 1,
+ 'decoder_func': 'DoTexParameterfv',
+ 'gl_test_func': 'glTexParameterf',
+ 'first_element_only': True,
+ },
+ 'TexParameteriv': {
+ 'type': 'PUT',
+ 'data_value': 'GL_NEAREST',
+ 'count': 1,
+ 'decoder_func': 'DoTexParameteriv',
+ 'gl_test_func': 'glTexParameteri',
+ 'first_element_only': True,
+ },
+ 'TexSubImage2D': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['shm'],
+ 'client_test': False,
+ 'cmd_args': 'GLenumTextureTarget target, GLint level, '
+ 'GLint xoffset, GLint yoffset, '
+ 'GLsizei width, GLsizei height, '
+ 'GLenumTextureFormat format, GLenumPixelType type, '
+ 'const void* pixels, GLboolean internal'
+ },
+ 'Uniform1f': {'type': 'PUTXn', 'count': 1},
+ 'Uniform1fv': {
+ 'type': 'PUTn',
+ 'count': 1,
+ 'decoder_func': 'DoUniform1fv',
+ },
+ 'Uniform1i': {'decoder_func': 'DoUniform1i', 'unit_test': False},
+ 'Uniform1iv': {
+ 'type': 'PUTn',
+ 'count': 1,
+ 'decoder_func': 'DoUniform1iv',
+ 'unit_test': False,
+ },
+ 'Uniform2i': {'type': 'PUTXn', 'count': 2},
+ 'Uniform2f': {'type': 'PUTXn', 'count': 2},
+ 'Uniform2fv': {
+ 'type': 'PUTn',
+ 'count': 2,
+ 'decoder_func': 'DoUniform2fv',
+ },
+ 'Uniform2iv': {
+ 'type': 'PUTn',
+ 'count': 2,
+ 'decoder_func': 'DoUniform2iv',
+ },
+ 'Uniform3i': {'type': 'PUTXn', 'count': 3},
+ 'Uniform3f': {'type': 'PUTXn', 'count': 3},
+ 'Uniform3fv': {
+ 'type': 'PUTn',
+ 'count': 3,
+ 'decoder_func': 'DoUniform3fv',
+ },
+ 'Uniform3iv': {
+ 'type': 'PUTn',
+ 'count': 3,
+ 'decoder_func': 'DoUniform3iv',
+ },
+ 'Uniform4i': {'type': 'PUTXn', 'count': 4},
+ 'Uniform4f': {'type': 'PUTXn', 'count': 4},
+ 'Uniform4fv': {
+ 'type': 'PUTn',
+ 'count': 4,
+ 'decoder_func': 'DoUniform4fv',
+ },
+ 'Uniform4iv': {
+ 'type': 'PUTn',
+ 'count': 4,
+ 'decoder_func': 'DoUniform4iv',
+ },
+ 'UniformMatrix2fv': {
+ 'type': 'PUTn',
+ 'count': 4,
+ 'decoder_func': 'DoUniformMatrix2fv',
+ },
+ 'UniformMatrix3fv': {
+ 'type': 'PUTn',
+ 'count': 9,
+ 'decoder_func': 'DoUniformMatrix3fv',
+ },
+ 'UniformMatrix4fv': {
+ 'type': 'PUTn',
+ 'count': 16,
+ 'decoder_func': 'DoUniformMatrix4fv',
+ },
+ 'UnmapBufferCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'UnmapBufferSubDataCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ 'pepper_interface': 'ChromiumMapSub',
+ },
+ 'UnmapImageCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'UnmapTexSubImage2DCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ 'pepper_interface': 'ChromiumMapSub',
+ },
+ 'UseProgram': {
+ 'type': 'Bind',
+ 'decoder_func': 'DoUseProgram',
+ },
+ 'ValidateProgram': {'decoder_func': 'DoValidateProgram'},
+ 'VertexAttrib1f': {'decoder_func': 'DoVertexAttrib1f'},
+ 'VertexAttrib1fv': {
+ 'type': 'PUT',
+ 'count': 1,
+ 'decoder_func': 'DoVertexAttrib1fv',
+ },
+ 'VertexAttrib2f': {'decoder_func': 'DoVertexAttrib2f'},
+ 'VertexAttrib2fv': {
+ 'type': 'PUT',
+ 'count': 2,
+ 'decoder_func': 'DoVertexAttrib2fv',
+ },
+ 'VertexAttrib3f': {'decoder_func': 'DoVertexAttrib3f'},
+ 'VertexAttrib3fv': {
+ 'type': 'PUT',
+ 'count': 3,
+ 'decoder_func': 'DoVertexAttrib3fv',
+ },
+ 'VertexAttrib4f': {'decoder_func': 'DoVertexAttrib4f'},
+ 'VertexAttrib4fv': {
+ 'type': 'PUT',
+ 'count': 4,
+ 'decoder_func': 'DoVertexAttrib4fv',
+ },
+ 'VertexAttribPointer': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLuint indx, GLintVertexAttribSize size, '
+ 'GLenumVertexAttribType type, GLboolean normalized, '
+ 'GLsizei stride, GLuint offset',
+ 'client_test': False,
+ },
+ 'Scissor': {
+ 'type': 'StateSet',
+ 'state': 'Scissor',
+ },
+ 'Viewport': {
+ 'decoder_func': 'DoViewport',
+ },
+ 'ResizeCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'GetRequestableExtensionsCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'cmd_args': 'uint32_t bucket_id',
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'RequestExtensionCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'uint32_t bucket_id',
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'RateLimitOffscreenContextCHROMIUM': {
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'CreateStreamTextureCHROMIUM': {
+ 'type': 'HandWritten',
+ 'impl_func': False,
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'TexImageIOSurface2DCHROMIUM': {
+ 'decoder_func': 'DoTexImageIOSurface2DCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'CopyTextureCHROMIUM': {
+ 'decoder_func': 'DoCopyTextureCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'TexStorage2DEXT': {
+ 'unit_test': False,
+ 'extension': True,
+ 'decoder_func': 'DoTexStorage2DEXT',
+ },
+ 'DrawArraysInstancedANGLE': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, '
+ 'GLsizei primcount',
+ 'extension': True,
+ 'unit_test': False,
+ 'pepper_interface': 'InstancedArrays',
+ 'defer_draws': True,
+ },
+ 'DrawBuffersEXT': {
+ 'type': 'PUTn',
+ 'decoder_func': 'DoDrawBuffersEXT',
+ 'count': 1,
+ 'client_test': False,
+ 'unit_test': False,
+ # could use 'extension_flag': 'ext_draw_buffers' but currently expected to
+ # work without.
+ 'extension': True,
+ 'pepper_interface': 'DrawBuffers',
+ },
+ 'DrawElementsInstancedANGLE': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
+ 'GLenumIndexType type, GLuint index_offset, GLsizei primcount',
+ 'extension': True,
+ 'unit_test': False,
+ 'client_test': False,
+ 'pepper_interface': 'InstancedArrays',
+ 'defer_draws': True,
+ },
+ 'VertexAttribDivisorANGLE': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLuint index, GLuint divisor',
+ 'extension': True,
+ 'unit_test': False,
+ 'pepper_interface': 'InstancedArrays',
+ },
+ 'GenQueriesEXT': {
+ 'type': 'GENn',
+ 'gl_test_func': 'glGenQueriesARB',
+ 'resource_type': 'Query',
+ 'resource_types': 'Queries',
+ 'unit_test': False,
+ 'pepper_interface': 'Query',
+ 'not_shared': 'True',
+ },
+ 'DeleteQueriesEXT': {
+ 'type': 'DELn',
+ 'gl_test_func': 'glDeleteQueriesARB',
+ 'resource_type': 'Query',
+ 'resource_types': 'Queries',
+ 'unit_test': False,
+ 'pepper_interface': 'Query',
+ },
+ 'IsQueryEXT': {
+ 'gen_cmd': False,
+ 'client_test': False,
+ 'pepper_interface': 'Query',
+ },
+ 'BeginQueryEXT': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumQueryTarget target, GLidQuery id, void* sync_data',
+ 'data_transfer_methods': ['shm'],
+ 'gl_test_func': 'glBeginQuery',
+ 'pepper_interface': 'Query',
+ },
+ 'EndQueryEXT': {
+ 'type': 'Manual',
+ 'cmd_args': 'GLenumQueryTarget target, GLuint submit_count',
+ 'gl_test_func': 'glEndnQuery',
+ 'client_test': False,
+ 'pepper_interface': 'Query',
+ },
+ 'GetQueryivEXT': {
+ 'gen_cmd': False,
+ 'client_test': False,
+ 'gl_test_func': 'glGetQueryiv',
+ 'pepper_interface': 'Query',
+ },
+ 'GetQueryObjectuivEXT': {
+ 'gen_cmd': False,
+ 'client_test': False,
+ 'gl_test_func': 'glGetQueryObjectuiv',
+ 'pepper_interface': 'Query',
+ },
+ 'BindUniformLocationCHROMIUM': {
+ 'type': 'GLchar',
+ 'extension': True,
+ 'data_transfer_methods': ['bucket'],
+ 'needs_size': True,
+ 'gl_test_func': 'DoBindUniformLocationCHROMIUM',
+ },
+ 'InsertEventMarkerEXT': {
+ 'type': 'GLcharN',
+ 'decoder_func': 'DoInsertEventMarkerEXT',
+ 'expectation': False,
+ 'extension': True,
+ },
+ 'PushGroupMarkerEXT': {
+ 'type': 'GLcharN',
+ 'decoder_func': 'DoPushGroupMarkerEXT',
+ 'expectation': False,
+ 'extension': True,
+ },
+ 'PopGroupMarkerEXT': {
+ 'decoder_func': 'DoPopGroupMarkerEXT',
+ 'expectation': False,
+ 'extension': True,
+ 'impl_func': False,
+ },
+
+ 'GenVertexArraysOES': {
+ 'type': 'GENn',
+ 'extension': True,
+ 'gl_test_func': 'glGenVertexArraysOES',
+ 'resource_type': 'VertexArray',
+ 'resource_types': 'VertexArrays',
+ 'unit_test': False,
+ 'pepper_interface': 'VertexArrayObject',
+ },
+ 'BindVertexArrayOES': {
+ 'type': 'Bind',
+ 'extension': True,
+ 'gl_test_func': 'glBindVertexArrayOES',
+ 'decoder_func': 'DoBindVertexArrayOES',
+ 'gen_func': 'GenVertexArraysOES',
+ 'unit_test': False,
+ 'client_test': False,
+ 'pepper_interface': 'VertexArrayObject',
+ },
+ 'DeleteVertexArraysOES': {
+ 'type': 'DELn',
+ 'extension': True,
+ 'gl_test_func': 'glDeleteVertexArraysOES',
+ 'resource_type': 'VertexArray',
+ 'resource_types': 'VertexArrays',
+ 'unit_test': False,
+ 'pepper_interface': 'VertexArrayObject',
+ },
+ 'IsVertexArrayOES': {
+ 'type': 'Is',
+ 'extension': True,
+ 'gl_test_func': 'glIsVertexArrayOES',
+ 'decoder_func': 'DoIsVertexArrayOES',
+ 'expectation': False,
+ 'unit_test': False,
+ 'pepper_interface': 'VertexArrayObject',
+ },
+ 'BindTexImage2DCHROMIUM': {
+ 'decoder_func': 'DoBindTexImage2DCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'ReleaseTexImage2DCHROMIUM': {
+ 'decoder_func': 'DoReleaseTexImage2DCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'ShallowFinishCHROMIUM': {
+ 'impl_func': False,
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'ShallowFlushCHROMIUM': {
+ 'impl_func': False,
+ 'gen_cmd': False,
+ 'extension': True,
+ 'chromium': True,
+ 'client_test': False,
+ },
+ 'TraceBeginCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'GLuint bucket_id',
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'TraceEndCHROMIUM': {
+ 'impl_func': False,
+ 'client_test': False,
+ 'decoder_func': 'DoTraceEndCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'AsyncTexImage2DCHROMIUM': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['shm'],
+ 'client_test': False,
+ 'cmd_args': 'GLenumTextureTarget target, GLint level, '
+ 'GLintTextureInternalFormat internalformat, '
+ 'GLsizei width, GLsizei height, '
+ 'GLintTextureBorder border, '
+ 'GLenumTextureFormat format, GLenumPixelType type, '
+ 'const void* pixels, '
+ 'uint32_t async_upload_token, '
+ 'void* sync_data',
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'AsyncTexSubImage2DCHROMIUM': {
+ 'type': 'Manual',
+ 'data_transfer_methods': ['shm'],
+ 'client_test': False,
+ 'cmd_args': 'GLenumTextureTarget target, GLint level, '
+ 'GLint xoffset, GLint yoffset, '
+ 'GLsizei width, GLsizei height, '
+ 'GLenumTextureFormat format, GLenumPixelType type, '
+ 'const void* data, '
+ 'uint32_t async_upload_token, '
+ 'void* sync_data',
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'WaitAsyncTexImage2DCHROMIUM': {
+ 'type': 'Manual',
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'WaitAllAsyncTexImage2DCHROMIUM': {
+ 'type': 'Manual',
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'DiscardFramebufferEXT': {
+ 'type': 'PUTn',
+ 'count': 1,
+ 'cmd_args': 'GLenum target, GLsizei count, '
+ 'const GLenum* attachments',
+ 'decoder_func': 'DoDiscardFramebufferEXT',
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension_flag': 'ext_discard_framebuffer',
+ },
+ 'LoseContextCHROMIUM': {
+ 'decoder_func': 'DoLoseContextCHROMIUM',
+ 'unit_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'InsertSyncPointCHROMIUM': {
+ 'type': 'HandWritten',
+ 'impl_func': False,
+ 'extension': "CHROMIUM_sync_point",
+ 'chromium': True,
+ },
+ 'WaitSyncPointCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': True,
+ 'extension': "CHROMIUM_sync_point",
+ 'chromium': True,
+ 'trace_level': 1,
+ },
+ 'DiscardBackbufferCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': True,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'ScheduleOverlayPlaneCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': True,
+ 'unit_test': False,
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
+ 'MatrixLoadfCHROMIUM': {
+ 'type': 'PUT',
+ 'count': 16,
+ 'data_type': 'GLfloat',
+ 'decoder_func': 'DoMatrixLoadfCHROMIUM',
+ 'gl_test_func': 'glMatrixLoadfEXT',
+ 'chromium': True,
+ 'extension': True,
+ 'extension_flag': 'chromium_path_rendering',
+ },
+ 'MatrixLoadIdentityCHROMIUM': {
+ 'decoder_func': 'DoMatrixLoadIdentityCHROMIUM',
+ 'gl_test_func': 'glMatrixLoadIdentityEXT',
+ 'chromium': True,
+ 'extension': True,
+ 'extension_flag': 'chromium_path_rendering',
+ },
+}
+
+
+def Grouper(n, iterable, fillvalue=None):
+ """Collect data into fixed-length chunks or blocks"""
+ args = [iter(iterable)] * n
+ return itertools.izip_longest(fillvalue=fillvalue, *args)
+
+
+def SplitWords(input_string):
+ """Transforms a input_string into a list of lower-case components.
+
+ Args:
+ input_string: the input string.
+
+ Returns:
+ a list of lower-case words.
+ """
+ if input_string.find('_') > -1:
+ # 'some_TEXT_' -> 'some text'
+ return input_string.replace('_', ' ').strip().lower().split()
+ else:
+ if re.search('[A-Z]', input_string) and re.search('[a-z]', input_string):
+ # mixed case.
+ # look for capitalization to cut input_strings
+ # 'SomeText' -> 'Some Text'
+ input_string = re.sub('([A-Z])', r' \1', input_string).strip()
+ # 'Vector3' -> 'Vector 3'
+ input_string = re.sub('([^0-9])([0-9])', r'\1 \2', input_string)
+ return input_string.lower().split()
+
+
+def Lower(words):
+ """Makes a lower-case identifier from words.
+
+ Args:
+ words: a list of lower-case words.
+
+ Returns:
+ the lower-case identifier.
+ """
+ return '_'.join(words)
+
+
+def ToUnderscore(input_string):
+ """converts CamelCase to camel_case."""
+ words = SplitWords(input_string)
+ return Lower(words)
+
+def CachedStateName(item):
+ if item.get('cached', False):
+ return 'cached_' + item['name']
+ return item['name']
+
+def ToGLExtensionString(extension_flag):
+ """Returns GL-type extension string of a extension flag."""
+ if extension_flag == "oes_compressed_etc1_rgb8_texture":
+ return "OES_compressed_ETC1_RGB8_texture" # Fixup inconsitency with rgb8,
+ # unfortunate.
+ uppercase_words = [ 'img', 'ext', 'arb', 'chromium', 'oes', 'amd', 'bgra8888',
+ 'egl', 'atc', 'etc1', 'angle']
+ parts = extension_flag.split('_')
+ return "_".join(
+ [part.upper() if part in uppercase_words else part for part in parts])
+
+def ToCamelCase(input_string):
+ """converts ABC_underscore_case to ABCUnderscoreCase."""
+ return ''.join(w[0].upper() + w[1:] for w in input_string.split('_'))
+
+def GetGLGetTypeConversion(result_type, value_type, value):
+ """Makes a gl compatible type conversion string for accessing state variables.
+
+ Useful when accessing state variables through glGetXXX calls.
+ glGet documetation (for example, the manual pages):
+ [...] If glGetIntegerv is called, [...] most floating-point values are
+ rounded to the nearest integer value. [...]
+
+ Args:
+ result_type: the gl type to be obtained
+ value_type: the GL type of the state variable
+ value: the name of the state variable
+
+ Returns:
+ String that converts the state variable to desired GL type according to GL
+ rules.
+ """
+
+ if result_type == 'GLint':
+ if value_type == 'GLfloat':
+ return 'static_cast<GLint>(round(%s))' % value
+ return 'static_cast<%s>(%s)' % (result_type, value)
+
+class CWriter(object):
+ """Writes to a file formatting it for Google's style guidelines."""
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.content = []
+
+ def Write(self, string):
+ """Writes a string to a file spliting if it's > 80 characters."""
+ lines = string.splitlines()
+ num_lines = len(lines)
+ for ii in range(0, num_lines):
+ self.content.append(lines[ii])
+ if ii < (num_lines - 1) or string[-1] == '\n':
+ self.content.append('\n')
+
+ def Close(self):
+ """Close the file."""
+ content = "".join(self.content)
+ write_file = True
+ if os.path.exists(self.filename):
+ old_file = open(self.filename, "rb");
+ old_content = old_file.read()
+ old_file.close();
+ if content == old_content:
+ write_file = False
+ if write_file:
+ file = open(self.filename, "wb")
+ file.write(content)
+ file.close()
+
+
+class CHeaderWriter(CWriter):
+ """Writes a C Header file."""
+
+ _non_alnum_re = re.compile(r'[^a-zA-Z0-9]')
+
+ def __init__(self, filename, file_comment = None):
+ CWriter.__init__(self, filename)
+
+ base = os.path.abspath(filename)
+ while os.path.basename(base) != 'src':
+ new_base = os.path.dirname(base)
+ assert new_base != base # Prevent infinite loop.
+ base = new_base
+
+ hpath = os.path.relpath(filename, base)
+ self.guard = self._non_alnum_re.sub('_', hpath).upper() + '_'
+
+ self.Write(_LICENSE)
+ self.Write(_DO_NOT_EDIT_WARNING)
+ if not file_comment == None:
+ self.Write(file_comment)
+ self.Write("#ifndef %s\n" % self.guard)
+ self.Write("#define %s\n\n" % self.guard)
+
+ def Close(self):
+ self.Write("#endif // %s\n\n" % self.guard)
+ CWriter.Close(self)
+
+class TypeHandler(object):
+ """This class emits code for a particular type of function."""
+
+ _remove_expected_call_re = re.compile(r' EXPECT_CALL.*?;\n', re.S)
+
+ def __init__(self):
+ pass
+
+ def InitFunction(self, func):
+ """Add or adjust anything type specific for this function."""
+ if func.GetInfo('needs_size') and not func.name.endswith('Bucket'):
+ func.AddCmdArg(DataSizeArgument('data_size'))
+
+ def NeedsDataTransferFunction(self, func):
+ """Overriden from TypeHandler."""
+ return func.num_pointer_args >= 1
+
+ def WriteStruct(self, func, file):
+ """Writes a structure that matches the arguments to a function."""
+ comment = func.GetInfo('cmd_comment')
+ if not comment == None:
+ file.Write(comment)
+ file.Write("struct %s {\n" % func.name)
+ file.Write(" typedef %s ValueType;\n" % func.name)
+ file.Write(" static const CommandId kCmdId = k%s;\n" % func.name)
+ func.WriteCmdArgFlag(file)
+ func.WriteCmdFlag(file)
+ file.Write("\n")
+ result = func.GetInfo('result')
+ if not result == None:
+ if len(result) == 1:
+ file.Write(" typedef %s Result;\n\n" % result[0])
+ else:
+ file.Write(" struct Result {\n")
+ for line in result:
+ file.Write(" %s;\n" % line)
+ file.Write(" };\n\n")
+
+ func.WriteCmdComputeSize(file)
+ func.WriteCmdSetHeader(file)
+ func.WriteCmdInit(file)
+ func.WriteCmdSet(file)
+
+ file.Write(" gpu::CommandHeader header;\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s %s;\n" % (arg.cmd_type, arg.name))
+
+ consts = func.GetCmdConstants()
+ for const in consts:
+ file.Write(" static const %s %s = %s;\n" %
+ (const.cmd_type, const.name, const.GetConstantValue()))
+
+ file.Write("};\n")
+ file.Write("\n")
+
+ size = len(args) * _SIZE_OF_UINT32 + _SIZE_OF_COMMAND_HEADER
+ file.Write("COMPILE_ASSERT(sizeof(%s) == %d,\n" % (func.name, size))
+ file.Write(" Sizeof_%s_is_not_%d);\n" % (func.name, size))
+ file.Write("COMPILE_ASSERT(offsetof(%s, header) == 0,\n" % func.name)
+ file.Write(" OffsetOf_%s_header_not_0);\n" % func.name)
+ offset = _SIZE_OF_COMMAND_HEADER
+ for arg in args:
+ file.Write("COMPILE_ASSERT(offsetof(%s, %s) == %d,\n" %
+ (func.name, arg.name, offset))
+ file.Write(" OffsetOf_%s_%s_not_%d);\n" %
+ (func.name, arg.name, offset))
+ offset += _SIZE_OF_UINT32
+ if not result == None and len(result) > 1:
+ offset = 0;
+ for line in result:
+ parts = line.split()
+ name = parts[-1]
+ check = """
+COMPILE_ASSERT(offsetof(%(cmd_name)s::Result, %(field_name)s) == %(offset)d,
+ OffsetOf_%(cmd_name)s_Result_%(field_name)s_not_%(offset)d);
+"""
+ file.Write((check.strip() + "\n") % {
+ 'cmd_name': func.name,
+ 'field_name': name,
+ 'offset': offset,
+ })
+ offset += _SIZE_OF_UINT32
+ file.Write("\n")
+
+ def WriteHandlerImplementation(self, func, file):
+ """Writes the handler implementation for this command."""
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+
+ def WriteCmdSizeTest(self, func, file):
+ """Writes the size test for a command."""
+ file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
+
+ def WriteFormatTest(self, func, file):
+ """Writes a format test for a command."""
+ file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ file.Write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
+ (func.name, func.name))
+ file.Write(" void* next_cmd = cmd.Set(\n")
+ file.Write(" &cmd")
+ args = func.GetCmdArgs()
+ for value, arg in enumerate(args):
+ file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
+ file.Write(");\n")
+ file.Write(" EXPECT_EQ(static_cast<uint32_t>(cmds::%s::kCmdId),\n" %
+ func.name)
+ file.Write(" cmd.header.command);\n")
+ func.type_handler.WriteCmdSizeTest(func, file)
+ for value, arg in enumerate(args):
+ file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
+ (arg.type, value + 11, arg.name))
+ file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
+ file.Write(" next_cmd, sizeof(cmd));\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Writes a format test for an immediate version of a command."""
+ pass
+
+ def WriteBucketFormatTest(self, func, file):
+ """Writes a format test for a bucket version of a command."""
+ pass
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Writes the code to set data_size used in validation"""
+ pass
+
+ def WriteImmediateCmdSizeTest(self, func, file):
+ """Writes a size test for an immediate version of a command."""
+ file.Write(" // TODO(gman): Compute correct size.\n")
+ file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
+
+ def WriteImmediateHandlerImplementation (self, func, file):
+ """Writes the handler impl for the immediate version of a command."""
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+
+ def WriteBucketHandlerImplementation (self, func, file):
+ """Writes the handler impl for the bucket version of a command."""
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+
+ def WriteServiceHandlerFunctionHeader(self, func, file):
+ """Writes function header for service implementation handlers."""
+ file.Write("""error::Error GLES2DecoderImpl::Handle%(name)s(
+ uint32_t immediate_data_size, const void* cmd_data) {
+ const gles2::cmds::%(name)s& c =
+ *static_cast<const gles2::cmds::%(name)s*>(cmd_data);
+ (void)c;
+ """ % {'name': func.name})
+
+ def WriteServiceImplementation(self, func, file):
+ """Writes the service implementation for a command."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ self.WriteHandlerExtensionCheck(func, file)
+ self.WriteHandlerDeferReadWrite(func, file);
+ if len(func.GetOriginalArgs()) > 0:
+ last_arg = func.GetLastOriginalArg()
+ all_but_last_arg = func.GetOriginalArgs()[:-1]
+ for arg in all_but_last_arg:
+ arg.WriteGetCode(file)
+ self.WriteGetDataSizeCode(func, file)
+ last_arg.WriteGetCode(file)
+ func.WriteHandlerValidation(file)
+ func.WriteHandlerImplementation(file)
+ file.Write(" return error::kNoError;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteImmediateServiceImplementation(self, func, file):
+ """Writes the service implementation for an immediate version of command."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ self.WriteHandlerExtensionCheck(func, file)
+ self.WriteHandlerDeferReadWrite(func, file);
+ last_arg = func.GetLastOriginalArg()
+ all_but_last_arg = func.GetOriginalArgs()[:-1]
+ for arg in all_but_last_arg:
+ arg.WriteGetCode(file)
+ self.WriteGetDataSizeCode(func, file)
+ last_arg.WriteGetCode(file)
+ func.WriteHandlerValidation(file)
+ func.WriteHandlerImplementation(file)
+ file.Write(" return error::kNoError;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteBucketServiceImplementation(self, func, file):
+ """Writes the service implementation for a bucket version of command."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ self.WriteHandlerExtensionCheck(func, file)
+ self.WriteHandlerDeferReadWrite(func, file);
+ last_arg = func.GetLastOriginalArg()
+ all_but_last_arg = func.GetOriginalArgs()[:-1]
+ for arg in all_but_last_arg:
+ arg.WriteGetCode(file)
+ self.WriteGetDataSizeCode(func, file)
+ last_arg.WriteGetCode(file)
+ func.WriteHandlerValidation(file)
+ func.WriteHandlerImplementation(file)
+ file.Write(" return error::kNoError;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteHandlerExtensionCheck(self, func, file):
+ if func.GetInfo('extension_flag'):
+ file.Write(" if (!features().%s) {\n" % func.GetInfo('extension_flag'))
+ file.Write(" LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, \"gl%s\","
+ " \"function not available\");\n" % func.original_name)
+ file.Write(" return error::kNoError;")
+ file.Write(" }\n\n")
+
+ def WriteHandlerDeferReadWrite(self, func, file):
+ """Writes the code to handle deferring reads or writes."""
+ defer_draws = func.GetInfo('defer_draws')
+ defer_reads = func.GetInfo('defer_reads')
+ if defer_draws or defer_reads:
+ file.Write(" error::Error error;\n")
+ if defer_draws:
+ file.Write(" error = WillAccessBoundFramebufferForDraw();\n")
+ file.Write(" if (error != error::kNoError)\n")
+ file.Write(" return error;\n")
+ if defer_reads:
+ file.Write(" error = WillAccessBoundFramebufferForRead();\n")
+ file.Write(" if (error != error::kNoError)\n")
+ file.Write(" return error;\n")
+
+ def WriteValidUnitTest(self, func, file, test, *extras):
+ """Writes a valid unit test for the service implementation."""
+ if func.GetInfo('expectation') == False:
+ test = self._remove_expected_call_re.sub('', test)
+ name = func.name
+ arg_strings = [
+ arg.GetValidArg(func) \
+ for arg in func.GetOriginalArgs() if not arg.IsConstant()
+ ]
+ gl_arg_strings = [
+ arg.GetValidGLArg(func) \
+ for arg in func.GetOriginalArgs()
+ ]
+ gl_func_name = func.GetGLTestFunctionName()
+ vars = {
+ 'name':name,
+ 'gl_func_name': gl_func_name,
+ 'args': ", ".join(arg_strings),
+ 'gl_args': ", ".join(gl_arg_strings),
+ }
+ for extra in extras:
+ vars.update(extra)
+ old_test = ""
+ while (old_test != test):
+ old_test = test
+ test = test % vars
+ file.Write(test % vars)
+
+ def WriteInvalidUnitTest(self, func, file, test, *extras):
+ """Writes an invalid unit test for the service implementation."""
+ for invalid_arg_index, invalid_arg in enumerate(func.GetOriginalArgs()):
+ # Service implementation does not test constants, as they are not part of
+ # the call in the service side.
+ if invalid_arg.IsConstant():
+ continue
+
+ num_invalid_values = invalid_arg.GetNumInvalidValues(func)
+ for value_index in range(0, num_invalid_values):
+ arg_strings = []
+ parse_result = "kNoError"
+ gl_error = None
+ for arg in func.GetOriginalArgs():
+ if arg.IsConstant():
+ continue
+ if invalid_arg is arg:
+ (arg_string, parse_result, gl_error) = arg.GetInvalidArg(
+ value_index)
+ else:
+ arg_string = arg.GetValidArg(func)
+ arg_strings.append(arg_string)
+ gl_arg_strings = []
+ for arg in func.GetOriginalArgs():
+ gl_arg_strings.append("_")
+ gl_func_name = func.GetGLTestFunctionName()
+ gl_error_test = ''
+ if not gl_error == None:
+ gl_error_test = '\n EXPECT_EQ(%s, GetGLError());' % gl_error
+
+ vars = {
+ 'name': func.name,
+ 'arg_index': invalid_arg_index,
+ 'value_index': value_index,
+ 'gl_func_name': gl_func_name,
+ 'args': ", ".join(arg_strings),
+ 'all_but_last_args': ", ".join(arg_strings[:-1]),
+ 'gl_args': ", ".join(gl_arg_strings),
+ 'parse_result': parse_result,
+ 'gl_error_test': gl_error_test,
+ }
+ for extra in extras:
+ vars.update(extra)
+ file.Write(test % vars)
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Writes the service unit test for a command."""
+
+ if func.name == 'Enable':
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ SetupExpectationsForEnableDisable(%(gl_args)s, true);
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ elif func.name == 'Disable':
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ SetupExpectationsForEnableDisable(%(gl_args)s, false);
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ else:
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, *extras)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Writes the service unit test for an immediate command."""
+ file.Write("// TODO(gman): %s\n" % func.name)
+
+ def WriteImmediateValidationCode(self, func, file):
+ """Writes the validation code for an immediate version of a command."""
+ pass
+
+ def WriteBucketServiceUnitTest(self, func, file, *extras):
+ """Writes the service unit test for a bucket command."""
+ file.Write("// TODO(gman): %s\n" % func.name)
+
+ def WriteBucketValidationCode(self, func, file):
+ """Writes the validation code for a bucket version of a command."""
+ file.Write("// TODO(gman): %s\n" % func.name)
+
+ def WriteGLES2ImplementationDeclaration(self, func, file):
+ """Writes the GLES2 Implemention declaration."""
+ impl_decl = func.GetInfo('impl_decl')
+ if impl_decl == None or impl_decl == True:
+ file.Write("virtual %s %s(%s) OVERRIDE;\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write("\n")
+
+ def WriteGLES2CLibImplementation(self, func, file):
+ file.Write("%s GLES2%s(%s) {\n" %
+ (func.return_type, func.name,
+ func.MakeTypedOriginalArgString("")))
+ result_string = "return "
+ if func.return_type == "void":
+ result_string = ""
+ file.Write(" %sgles2::GetGLContext()->%s(%s);\n" %
+ (result_string, func.original_name,
+ func.MakeOriginalArgString("")))
+ file.Write("}\n")
+
+ def WriteGLES2Header(self, func, file):
+ """Writes a re-write macro for GLES"""
+ file.Write("#define gl%s GLES2_GET_FUN(%s)\n" %(func.name, func.name))
+
+ def WriteClientGLCallLog(self, func, file):
+ """Writes a logging macro for the client side code."""
+ comma = ""
+ if len(func.GetOriginalArgs()):
+ comma = " << "
+ file.Write(
+ ' GPU_CLIENT_LOG("[" << GetLogPrefix() << "] gl%s("%s%s << ")");\n' %
+ (func.original_name, comma, func.MakeLogArgString()))
+
+ def WriteClientGLReturnLog(self, func, file):
+ """Writes the return value logging code."""
+ if func.return_type != "void":
+ file.Write(' GPU_CLIENT_LOG("return:" << result)\n')
+
+ def WriteGLES2ImplementationHeader(self, func, file):
+ """Writes the GLES2 Implemention."""
+ self.WriteGLES2ImplementationDeclaration(func, file)
+
+ def WriteGLES2TraceImplementationHeader(self, func, file):
+ """Writes the GLES2 Trace Implemention header."""
+ file.Write("virtual %s %s(%s) OVERRIDE;\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+
+ def WriteGLES2TraceImplementation(self, func, file):
+ """Writes the GLES2 Trace Implemention."""
+ file.Write("%s GLES2TraceImplementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ result_string = "return "
+ if func.return_type == "void":
+ result_string = ""
+ file.Write(' TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::%s");\n' %
+ func.name)
+ file.Write(" %sgl_->%s(%s);\n" %
+ (result_string, func.name, func.MakeOriginalArgString("")))
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2Implementation(self, func, file):
+ """Writes the GLES2 Implemention."""
+ impl_func = func.GetInfo('impl_func')
+ impl_decl = func.GetInfo('impl_decl')
+ gen_cmd = func.GetInfo('gen_cmd')
+ if (func.can_auto_generate and
+ (impl_func == None or impl_func == True) and
+ (impl_decl == None or impl_decl == True) and
+ (gen_cmd == None or gen_cmd == True)):
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ self.WriteClientGLCallLog(func, file)
+ func.WriteDestinationInitalizationValidation(file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ file.Write(" helper_->%s(%s);\n" %
+ (func.name, func.MakeHelperArgString("")))
+ file.Write(" CheckGLError();\n")
+ self.WriteClientGLReturnLog(func, file)
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2InterfaceHeader(self, func, file):
+ """Writes the GLES2 Interface."""
+ file.Write("virtual %s %s(%s) = 0;\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+
+ def WriteGLES2InterfaceStub(self, func, file):
+ """Writes the GLES2 Interface stub declaration."""
+ file.Write("virtual %s %s(%s) OVERRIDE;\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+
+ def WriteGLES2InterfaceStubImpl(self, func, file):
+ """Writes the GLES2 Interface stub declaration."""
+ args = func.GetOriginalArgs()
+ arg_string = ", ".join(
+ ["%s /* %s */" % (arg.type, arg.name) for arg in args])
+ file.Write("%s GLES2InterfaceStub::%s(%s) {\n" %
+ (func.return_type, func.original_name, arg_string))
+ if func.return_type != "void":
+ file.Write(" return 0;\n")
+ file.Write("}\n")
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Writes the GLES2 Implemention unit test."""
+ client_test = func.GetInfo('client_test')
+ if (func.can_auto_generate and
+ (client_test == None or client_test == True)):
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ struct Cmds {
+ cmds::%(name)s cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(%(cmd_args)s);
+
+ gl_->%(name)s(%(args)s);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+"""
+ cmd_arg_strings = [
+ arg.GetValidClientSideCmdArg(func) for arg in func.GetCmdArgs()
+ ]
+
+ gl_arg_strings = [
+ arg.GetValidClientSideArg(func) for arg in func.GetOriginalArgs()
+ ]
+
+ file.Write(code % {
+ 'name': func.name,
+ 'args': ", ".join(gl_arg_strings),
+ 'cmd_args': ", ".join(cmd_arg_strings),
+ })
+
+ # Test constants for invalid values, as they are not tested by the
+ # service.
+ constants = [arg for arg in func.GetOriginalArgs() if arg.IsConstant()]
+ if constants:
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
+ gl_->%(name)s(%(args)s);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(%(gl_error)s, CheckError());
+}
+"""
+ for invalid_arg in constants:
+ gl_arg_strings = []
+ invalid = invalid_arg.GetInvalidArg(func)
+ for arg in func.GetOriginalArgs():
+ if arg is invalid_arg:
+ gl_arg_strings.append(invalid[0])
+ else:
+ gl_arg_strings.append(arg.GetValidClientSideArg(func))
+
+ file.Write(code % {
+ 'name': func.name,
+ 'invalid_index': func.GetOriginalArgs().index(invalid_arg),
+ 'args': ", ".join(gl_arg_strings),
+ 'gl_error': invalid[2],
+ })
+ else:
+ if client_test != False:
+ file.Write("// TODO: Implement unit test for %s\n" % func.name)
+
+ def WriteDestinationInitalizationValidation(self, func, file):
+ """Writes the client side destintion initialization validation."""
+ for arg in func.GetOriginalArgs():
+ arg.WriteDestinationInitalizationValidation(file, func)
+
+ def WriteTraceEvent(self, func, file):
+ file.Write(' TRACE_EVENT0("gpu", "GLES2Implementation::%s");\n' %
+ func.original_name)
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Writes the size computation code for the immediate version of a cmd."""
+ file.Write(" static uint32_t ComputeSize(uint32_t size_in_bytes) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(ValueType) + // NOLINT\n")
+ file.Write(" RoundSizeToMultipleOfEntries(size_in_bytes));\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Writes the SetHeader function for the immediate version of a cmd."""
+ file.Write(" void SetHeader(uint32_t size_in_bytes) {\n")
+ file.Write(" header.SetCmdByTotalSize<ValueType>(size_in_bytes);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Writes the Init function for the immediate version of a command."""
+ raise NotImplementedError(func.name)
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Writes the Set function for the immediate version of a command."""
+ raise NotImplementedError(func.name)
+
+ def WriteCmdHelper(self, func, file):
+ """Writes the cmd helper definition for a cmd."""
+ code = """ void %(name)s(%(typed_args)s) {
+ gles2::cmds::%(name)s* c = GetCmdSpace<gles2::cmds::%(name)s>();
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedCmdArgString(""),
+ "args": func.MakeCmdArgString(""),
+ })
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Writes the cmd helper definition for the immediate version of a cmd."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t s = 0; // TODO(gman): compute correct size
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(s);
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedCmdArgString(""),
+ "args": func.MakeCmdArgString(""),
+ })
+
+
+class StateSetHandler(TypeHandler):
+ """Handler for commands that simply set state."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ args = func.GetOriginalArgs()
+ for ndx,item in enumerate(states):
+ code = []
+ if 'range_checks' in item:
+ for range_check in item['range_checks']:
+ code.append("%s %s" % (args[ndx].name, range_check['check']))
+ if 'nan_check' in item:
+ # Drivers might generate an INVALID_VALUE error when a value is set
+ # to NaN. This is allowed behavior under GLES 3.0 section 2.1.1 or
+ # OpenGL 4.5 section 2.3.4.1 - providing NaN allows undefined results.
+ # Make this behavior consistent within Chromium, and avoid leaking GL
+ # errors by generating the error in the command buffer instead of
+ # letting the GL driver generate it.
+ code.append("base::IsNaN(%s)" % args[ndx].name)
+ if len(code):
+ file.Write(" if (%s) {\n" % " ||\n ".join(code))
+ file.Write(
+ ' LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,'
+ ' "%s", "%s out of range");\n' %
+ (func.name, args[ndx].name))
+ file.Write(" return error::kNoError;\n")
+ file.Write(" }\n")
+ code = []
+ for ndx,item in enumerate(states):
+ code.append("state_.%s != %s" % (item['name'], args[ndx].name))
+ file.Write(" if (%s) {\n" % " ||\n ".join(code))
+ for ndx,item in enumerate(states):
+ file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
+ if 'state_flag' in state:
+ file.Write(" %s = true;\n" % state['state_flag'])
+ if not func.GetInfo("no_gl"):
+ for ndx,item in enumerate(states):
+ if item.get('cached', False):
+ file.Write(" state_.%s = %s;\n" %
+ (CachedStateName(item), args[ndx].name))
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" }\n")
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ TypeHandler.WriteServiceUnitTest(self, func, file, *extras)
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ for ndx,item in enumerate(states):
+ if 'range_checks' in item:
+ for check_ndx, range_check in enumerate(item['range_checks']):
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidValue%(ndx)d_%(check_ndx)d) {
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+"""
+ name = func.name
+ arg_strings = [
+ arg.GetValidArg(func) \
+ for arg in func.GetOriginalArgs() if not arg.IsConstant()
+ ]
+
+ arg_strings[ndx] = range_check['test_value']
+ vars = {
+ 'name': name,
+ 'ndx': ndx,
+ 'check_ndx': check_ndx,
+ 'args': ", ".join(arg_strings),
+ }
+ for extra in extras:
+ vars.update(extra)
+ file.Write(valid_test % vars)
+ if 'nan_check' in item:
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sNaNValue%(ndx)d) {
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+"""
+ name = func.name
+ arg_strings = [
+ arg.GetValidArg(func) \
+ for arg in func.GetOriginalArgs() if not arg.IsConstant()
+ ]
+
+ arg_strings[ndx] = 'nanf("")'
+ vars = {
+ 'name': name,
+ 'ndx': ndx,
+ 'args': ", ".join(arg_strings),
+ }
+ for extra in extras:
+ vars.update(extra)
+ file.Write(valid_test % vars)
+
+
+class StateSetRGBAlphaHandler(TypeHandler):
+ """Handler for commands that simply set state that have rgb/alpha."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ args = func.GetOriginalArgs()
+ num_args = len(args)
+ code = []
+ for ndx,item in enumerate(states):
+ code.append("state_.%s != %s" % (item['name'], args[ndx % num_args].name))
+ file.Write(" if (%s) {\n" % " ||\n ".join(code))
+ for ndx, item in enumerate(states):
+ file.Write(" state_.%s = %s;\n" %
+ (item['name'], args[ndx % num_args].name))
+ if 'state_flag' in state:
+ file.Write(" %s = true;\n" % state['state_flag'])
+ if not func.GetInfo("no_gl"):
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" }\n")
+
+
+class StateSetFrontBackSeparateHandler(TypeHandler):
+ """Handler for commands that simply set state that have front/back."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ args = func.GetOriginalArgs()
+ face = args[0].name
+ num_args = len(args)
+ file.Write(" bool changed = false;\n")
+ for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
+ file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
+ (face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
+ code = []
+ for ndx, item in enumerate(group):
+ code.append("state_.%s != %s" % (item['name'], args[ndx + 1].name))
+ file.Write(" changed |= %s;\n" % " ||\n ".join(code))
+ file.Write(" }\n")
+ file.Write(" if (changed) {\n")
+ for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
+ file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
+ (face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
+ for ndx, item in enumerate(group):
+ file.Write(" state_.%s = %s;\n" %
+ (item['name'], args[ndx + 1].name))
+ file.Write(" }\n")
+ if 'state_flag' in state:
+ file.Write(" %s = true;\n" % state['state_flag'])
+ if not func.GetInfo("no_gl"):
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" }\n")
+
+
+class StateSetFrontBackHandler(TypeHandler):
+ """Handler for commands that simply set state that set both front/back."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ args = func.GetOriginalArgs()
+ num_args = len(args)
+ code = []
+ for group_ndx, group in enumerate(Grouper(num_args, states)):
+ for ndx, item in enumerate(group):
+ code.append("state_.%s != %s" % (item['name'], args[ndx].name))
+ file.Write(" if (%s) {\n" % " ||\n ".join(code))
+ for group_ndx, group in enumerate(Grouper(num_args, states)):
+ for ndx, item in enumerate(group):
+ file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
+ if 'state_flag' in state:
+ file.Write(" %s = true;\n" % state['state_flag'])
+ if not func.GetInfo("no_gl"):
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" }\n")
+
+
+class StateSetNamedParameter(TypeHandler):
+ """Handler for commands that set a state chosen with an enum parameter."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overridden from TypeHandler."""
+ state_name = func.GetInfo('state')
+ state = _STATES[state_name]
+ states = state['states']
+ args = func.GetOriginalArgs()
+ num_args = len(args)
+ assert num_args == 2
+ file.Write(" switch (%s) {\n" % args[0].name)
+ for state in states:
+ file.Write(" case %s:\n" % state['enum'])
+ file.Write(" if (state_.%s != %s) {\n" %
+ (state['name'], args[1].name))
+ file.Write(" state_.%s = %s;\n" % (state['name'], args[1].name))
+ if not func.GetInfo("no_gl"):
+ file.Write(" %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" }\n")
+ file.Write(" break;\n")
+ file.Write(" default:\n")
+ file.Write(" NOTREACHED();\n")
+ file.Write(" }\n")
+
+
+class CustomHandler(TypeHandler):
+ """Handler for commands that are auto-generated but require minor tweaks."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteImmediateServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteBucketServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateCmdGetTotalSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(
+ " uint32_t total_size = 0; // TODO(gman): get correct size.\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
+ self.WriteImmediateCmdGetTotalSize(func, file)
+ file.Write(" SetHeader(total_size);\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s) {\n" %
+ func.MakeTypedCmdArgString("_", True))
+ self.WriteImmediateCmdGetTotalSize(func, file)
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, total_size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+
+class TodoHandler(CustomHandler):
+ """Handle for commands that are not yet implemented."""
+
+ def NeedsDataTransferFunction(self, func):
+ """Overriden from TypeHandler."""
+ return False
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" // TODO: for now this is a no-op\n")
+ file.Write(
+ " SetGLError("
+ "GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
+ func.name)
+ if func.return_type != "void":
+ file.Write(" return 0;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ file.Write(" // TODO: for now this is a no-op\n")
+ file.Write(
+ " LOCAL_SET_GL_ERROR("
+ "GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
+ func.name)
+ file.Write(" return error::kNoError;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class HandWrittenHandler(CustomHandler):
+ """Handler for comands where everything must be written by hand."""
+
+ def InitFunction(self, func):
+ """Add or adjust anything type specific for this function."""
+ CustomHandler.InitFunction(self, func)
+ func.can_auto_generate = False
+
+ def NeedsDataTransferFunction(self, func):
+ """Overriden from TypeHandler."""
+ # If specified explicitly, force the data transfer method.
+ if func.GetInfo('data_transfer_methods'):
+ return True
+ return False
+
+ def WriteStruct(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteDocs(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteBucketServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteImmediateServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteBucketServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteBucketCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): Write test for %s\n" % func.name)
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): Write test for %s\n" % func.name)
+
+ def WriteBucketFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): Write test for %s\n" % func.name)
+
+
+
+class ManualHandler(CustomHandler):
+ """Handler for commands who's handlers must be written by hand."""
+
+ def __init__(self):
+ CustomHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ if (func.name == 'CompressedTexImage2DBucket'):
+ func.cmd_args = func.cmd_args[:-1]
+ func.AddCmdArg(Argument('bucket_id', 'GLuint'))
+ else:
+ CustomHandler.InitFunction(self, func)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteBucketServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): Implement test for %s\n" % func.name)
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ if func.GetInfo('impl_func'):
+ super(ManualHandler, self).WriteGLES2Implementation(func, file)
+
+ def WriteGLES2ImplementationHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("virtual %s %s(%s) OVERRIDE;\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write("\n")
+
+ def WriteImmediateCmdGetTotalSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ # TODO(gman): Move this data to _FUNCTION_INFO?
+ CustomHandler.WriteImmediateCmdGetTotalSize(self, func, file)
+
+
+class DataHandler(TypeHandler):
+ """Handler for glBufferData, glBufferSubData, glTexImage2D, glTexSubImage2D,
+ glCompressedTexImage2D, glCompressedTexImageSub2D."""
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ if func.name == 'CompressedTexSubImage2DBucket':
+ func.cmd_args = func.cmd_args[:-1]
+ func.AddCmdArg(Argument('bucket_id', 'GLuint'))
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Overrriden from TypeHandler."""
+ # TODO(gman): Move this data to _FUNCTION_INFO?
+ name = func.name
+ if name.endswith("Immediate"):
+ name = name[0:-9]
+ if name == 'BufferData' or name == 'BufferSubData':
+ file.Write(" uint32_t data_size = size;\n")
+ elif (name == 'CompressedTexImage2D' or
+ name == 'CompressedTexSubImage2D'):
+ file.Write(" uint32_t data_size = imageSize;\n")
+ elif (name == 'CompressedTexSubImage2DBucket'):
+ file.Write(" Bucket* bucket = GetBucket(c.bucket_id);\n")
+ file.Write(" uint32_t data_size = bucket->size();\n")
+ file.Write(" GLsizei imageSize = data_size;\n")
+ elif name == 'TexImage2D' or name == 'TexSubImage2D':
+ code = """ uint32_t data_size;
+ if (!GLES2Util::ComputeImageDataSize(
+ width, height, format, type, unpack_alignment_, &data_size)) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code)
+ else:
+ file.Write(
+ "// uint32_t data_size = 0; // TODO(gman): get correct size!\n")
+
+ def WriteImmediateCmdGetTotalSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteImmediateCmdSizeTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" EXPECT_EQ(sizeof(cmd), total_size);\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
+ self.WriteImmediateCmdGetTotalSize(func, file)
+ file.Write(" SetHeader(total_size);\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s) {\n" %
+ func.MakeTypedCmdArgString("_", True))
+ self.WriteImmediateCmdGetTotalSize(func, file)
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, total_size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ # TODO(gman): Remove this exception.
+ file.Write("// TODO(gman): Implement test for %s\n" % func.name)
+ return
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ file.Write("// TODO(gman): %s\n\n" % func.name)
+
+ def WriteBucketServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ if not func.name == 'CompressedTexSubImage2DBucket':
+ TypeHandler.WriteBucketServiceImplemenation(self, func, file)
+
+
+class BindHandler(TypeHandler):
+ """Handler for glBind___ type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+
+ if len(func.GetOriginalArgs()) == 1:
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ if func.GetInfo("gen_func"):
+ valid_test += """
+TEST_P(%(test_name)s, %(name)sValidArgsNewId) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(kNewServiceId));
+ EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(Get%(resource_type)s(kNewClientId) != NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'resource_type': func.GetOriginalArgs()[0].resource_type,
+ 'gl_gen_func_name': func.GetInfo("gen_func"),
+ }, *extras)
+ else:
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ if func.GetInfo("gen_func"):
+ valid_test += """
+TEST_P(%(test_name)s, %(name)sValidArgsNewId) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(first_gl_arg)s, kNewServiceId));
+ EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(first_arg)s, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(Get%(resource_type)s(kNewClientId) != NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'first_arg': func.GetOriginalArgs()[0].GetValidArg(func),
+ 'first_gl_arg': func.GetOriginalArgs()[0].GetValidGLArg(func),
+ 'resource_type': func.GetOriginalArgs()[1].resource_type,
+ 'gl_gen_func_name': func.GetInfo("gen_func"),
+ }, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, *extras)
+
+ def WriteGLES2Implementation(self, func, file):
+ """Writes the GLES2 Implemention."""
+
+ impl_func = func.GetInfo('impl_func')
+ impl_decl = func.GetInfo('impl_decl')
+
+ if (func.can_auto_generate and
+ (impl_func == None or impl_func == True) and
+ (impl_decl == None or impl_decl == True)):
+
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+
+ code = """ if (Is%(type)sReservedId(%(id)s)) {
+ SetGLError(GL_INVALID_OPERATION, "%(name)s\", \"%(id)s reserved id");
+ return;
+ }
+ if (%(name)sHelper(%(arg_string)s)) {
+ helper_->%(name)s(%(arg_string)s);
+ }
+ CheckGLError();
+}
+
+"""
+ name_arg = None
+ if len(func.GetOriginalArgs()) == 1:
+ # Bind functions that have no target (like BindVertexArrayOES)
+ name_arg = func.GetOriginalArgs()[0]
+ else:
+ # Bind functions that have both a target and a name (like BindTexture)
+ name_arg = func.GetOriginalArgs()[1]
+
+ file.Write(code % {
+ 'name': func.name,
+ 'arg_string': func.MakeOriginalArgString(""),
+ 'id': name_arg.name,
+ 'type': name_arg.resource_type,
+ 'lc_type': name_arg.resource_type.lower(),
+ })
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ client_test = func.GetInfo('client_test')
+ if client_test == False:
+ return
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ struct Cmds {
+ cmds::%(name)s cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(%(cmd_args)s);
+
+ gl_->%(name)s(%(args)s);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->%(name)s(%(args)s);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+"""
+ cmd_arg_strings = [
+ arg.GetValidClientSideCmdArg(func) for arg in func.GetCmdArgs()
+ ]
+ gl_arg_strings = [
+ arg.GetValidClientSideArg(func) for arg in func.GetOriginalArgs()
+ ]
+
+ file.Write(code % {
+ 'name': func.name,
+ 'args': ", ".join(gl_arg_strings),
+ 'cmd_args': ", ".join(cmd_arg_strings),
+ })
+
+
+class GENnHandler(TypeHandler):
+ """Handler for glGen___ type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code)
+
+ def WriteHandlerImplementation (self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" if (!%sHelper(n, %s)) {\n"
+ " return error::kInvalidArguments;\n"
+ " }\n" %
+ (func.name, func.GetLastOriginalArg().name))
+
+ def WriteImmediateHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" if (!%sHelper(n, %s)) {\n"
+ " return error::kInvalidArguments;\n"
+ " }\n" %
+ (func.original_name, func.GetLastOriginalArg().name))
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ log_code = (""" GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
+ }
+ });""" % func.GetOriginalArgs()[1].name)
+ args = {
+ 'log_code': log_code,
+ 'return_type': func.return_type,
+ 'name': func.original_name,
+ 'typed_args': func.MakeTypedOriginalArgString(""),
+ 'args': func.MakeOriginalArgString(""),
+ 'resource_types': func.GetInfo('resource_types'),
+ 'count_name': func.GetOriginalArgs()[0].name,
+ }
+ file.Write(
+ "%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
+ args)
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ not_shared = func.GetInfo('not_shared')
+ if not_shared:
+ alloc_code = (
+
+""" IdAllocator* id_allocator = GetIdAllocator(id_namespaces::k%s);
+ for (GLsizei ii = 0; ii < n; ++ii)
+ %s[ii] = id_allocator->AllocateID();""" %
+ (func.GetInfo('resource_types'), func.GetOriginalArgs()[1].name))
+ else:
+ alloc_code = (""" GetIdHandler(id_namespaces::k%(resource_types)s)->
+ MakeIds(this, 0, %(args)s);""" % args)
+ args['alloc_code'] = alloc_code
+
+ code = """ GPU_CLIENT_SINGLE_THREAD_CHECK();
+%(alloc_code)s
+ %(name)sHelper(%(args)s);
+ helper_->%(name)sImmediate(%(args)s);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+%(log_code)s
+ CheckGLError();
+}
+
+"""
+ file.Write(code % args)
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ GLuint ids[2] = { 0, };
+ struct Cmds {
+ cmds::%(name)sImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = k%(types)sStartId;
+ expected.data[1] = k%(types)sStartId + 1;
+ gl_->%(name)s(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(k%(types)sStartId, ids[0]);
+ EXPECT_EQ(k%(types)sStartId + 1, ids[1]);
+}
+"""
+ file.Write(code % {
+ 'name': func.name,
+ 'types': func.GetInfo('resource_types'),
+ })
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ GetSharedMemoryAs<GLuint*>()[0] = kNewClientId;
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(Get%(resource_name)s(kNewClientId) != NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'resource_name': func.GetInfo('resource_type'),
+ }, *extras)
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
+ GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, {
+ 'resource_name': func.GetInfo('resource_type').lower(),
+ }, *extras)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::%(name)s* cmd = GetImmediateAs<cmds::%(name)s>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(Get%(resource_name)s(kNewClientId) != NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'resource_name': func.GetInfo('resource_type'),
+ }, *extras)
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
+ cmds::%(name)s* cmd = GetImmediateAs<cmds::%(name)s>();
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmd->Init(1, &client_%(resource_name)s_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_%(resource_name)s_id_)));
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, {
+ 'resource_name': func.GetInfo('resource_type').lower(),
+ }, *extras)
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" static uint32_t ComputeDataSize(GLsizei n) {\n")
+ file.Write(
+ " return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+ file.Write(" static uint32_t ComputeSize(GLsizei n) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void SetHeader(GLsizei n) {\n")
+ file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ file.Write(" void Init(%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_"),
+ last_arg.type, last_arg.name))
+ file.Write(" SetHeader(_n);\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" memcpy(ImmediateDataAddress(this),\n")
+ file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_", True),
+ last_arg.type, last_arg.name))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
+ (copy_args, last_arg.name))
+ file.Write(" const uint32_t size = ComputeSize(_n);\n")
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t size = gles2::cmds::%(name)s::ComputeSize(n);
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedOriginalArgString(""),
+ "args": func.MakeOriginalArgString(""),
+ })
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
+ file.Write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
+ (func.name, func.name))
+ file.Write(" void* next_cmd = cmd.Set(\n")
+ file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
+ file.Write(" EXPECT_EQ(static_cast<uint32_t>(cmds::%s::kCmdId),\n" %
+ func.name)
+ file.Write(" cmd.header.command);\n")
+ file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
+ file.Write(" cmd.header.size * 4u);\n")
+ file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
+ file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
+ file.Write(" next_cmd, sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
+ file.Write(" // TODO(gman): Check that ids were inserted;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class CreateHandler(TypeHandler):
+ """Handler for glCreate___ type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ func.AddCmdArg(Argument("client_id", 'uint32_t'))
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
+ .WillOnce(Return(kNewServiceId));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s%(comma)skNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(Get%(resource_type)s(kNewClientId) != NULL);
+}
+"""
+ comma = ""
+ if len(func.GetOriginalArgs()):
+ comma =", "
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'comma': comma,
+ 'resource_type': func.name[6:],
+ }, *extras)
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s%(comma)skNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, {
+ 'comma': comma,
+ }, *extras)
+
+ def WriteHandlerImplementation (self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" uint32_t client_id = c.client_id;\n")
+ file.Write(" if (!%sHelper(%s)) {\n" %
+ (func.name, func.MakeCmdArgString("")))
+ file.Write(" return error::kInvalidArguments;\n")
+ file.Write(" }\n")
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ file.Write(" GLuint client_id;\n")
+ file.Write(
+ " GetIdHandler(id_namespaces::kProgramsAndShaders)->\n")
+ file.Write(" MakeIds(this, 0, 1, &client_id);\n")
+ file.Write(" helper_->%s(%s);\n" %
+ (func.name, func.MakeCmdArgString("")))
+ file.Write(' GPU_CLIENT_LOG("returned " << client_id);\n')
+ file.Write(" CheckGLError();\n")
+ file.Write(" return client_id;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class DeleteHandler(TypeHandler):
+ """Handler for glDelete___ single resource type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ file.Write(
+ " GPU_CLIENT_DCHECK(%s != 0);\n" % func.GetOriginalArgs()[-1].name)
+ file.Write(" %sHelper(%s);\n" %
+ (func.original_name, func.GetOriginalArgs()[-1].name))
+ file.Write(" CheckGLError();\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class DELnHandler(TypeHandler):
+ """Handler for glDelete___ type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code)
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ GLuint ids[2] = { k%(types)sStartId, k%(types)sStartId + 1 };
+ struct Cmds {
+ cmds::%(name)sImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = k%(types)sStartId;
+ expected.data[1] = k%(types)sStartId + 1;
+ gl_->%(name)s(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+"""
+ file.Write(code % {
+ 'name': func.name,
+ 'types': func.GetInfo('resource_types'),
+ })
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(
+ *gl_,
+ %(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
+ .Times(1);
+ GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(
+ Get%(upper_resource_name)s(client_%(resource_name)s_id_) == NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'resource_name': func.GetInfo('resource_type').lower(),
+ 'upper_resource_name': func.GetInfo('resource_type'),
+ }, *extras)
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs) {
+ GetSharedMemoryAs<GLuint*>()[0] = kInvalidClientId;
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, *extras)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(
+ *gl_,
+ %(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
+ .Times(1);
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmd.Init(1, &client_%(resource_name)s_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_%(resource_name)s_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(
+ Get%(upper_resource_name)s(client_%(resource_name)s_id_) == NULL);
+}
+"""
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'resource_name': func.GetInfo('resource_type').lower(),
+ 'upper_resource_name': func.GetInfo('resource_type'),
+ }, *extras)
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs) {
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, *extras)
+
+ def WriteHandlerImplementation (self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" %sHelper(n, %s);\n" %
+ (func.name, func.GetLastOriginalArg().name))
+
+ def WriteImmediateHandlerImplementation (self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" %sHelper(n, %s);\n" %
+ (func.original_name, func.GetLastOriginalArg().name))
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ impl_decl = func.GetInfo('impl_decl')
+ if impl_decl == None or impl_decl == True:
+ args = {
+ 'return_type': func.return_type,
+ 'name': func.original_name,
+ 'typed_args': func.MakeTypedOriginalArgString(""),
+ 'args': func.MakeOriginalArgString(""),
+ 'resource_type': func.GetInfo('resource_type').lower(),
+ 'count_name': func.GetOriginalArgs()[0].name,
+ }
+ file.Write(
+ "%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
+ args)
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
+ }
+ });
+""" % func.GetOriginalArgs()[1].name)
+ file.Write(""" GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(%s[i] != 0);
+ }
+ });
+""" % func.GetOriginalArgs()[1].name)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ code = """ %(name)sHelper(%(args)s);
+ CheckGLError();
+}
+
+"""
+ file.Write(code % args)
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" static uint32_t ComputeDataSize(GLsizei n) {\n")
+ file.Write(
+ " return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+ file.Write(" static uint32_t ComputeSize(GLsizei n) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void SetHeader(GLsizei n) {\n")
+ file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ file.Write(" void Init(%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_"),
+ last_arg.type, last_arg.name))
+ file.Write(" SetHeader(_n);\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" memcpy(ImmediateDataAddress(this),\n")
+ file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_", True),
+ last_arg.type, last_arg.name))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
+ (copy_args, last_arg.name))
+ file.Write(" const uint32_t size = ComputeSize(_n);\n")
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t size = gles2::cmds::%(name)s::ComputeSize(n);
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedOriginalArgString(""),
+ "args": func.MakeOriginalArgString(""),
+ })
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
+ file.Write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
+ (func.name, func.name))
+ file.Write(" void* next_cmd = cmd.Set(\n")
+ file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
+ file.Write(" EXPECT_EQ(static_cast<uint32_t>(cmds::%s::kCmdId),\n" %
+ func.name)
+ file.Write(" cmd.header.command);\n")
+ file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
+ file.Write(" cmd.header.size * 4u);\n")
+ file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
+ file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
+ file.Write(" next_cmd, sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
+ file.Write(" // TODO(gman): Check that ids were inserted;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class GETnHandler(TypeHandler):
+ """Handler for GETn for glGetBooleanv, glGetFloatv, ... type functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def NeedsDataTransferFunction(self, func):
+ """Overriden from TypeHandler."""
+ return False
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ last_arg = func.GetLastOriginalArg()
+
+ all_but_last_args = func.GetOriginalArgs()[:-1]
+ for arg in all_but_last_args:
+ arg.WriteGetCode(file)
+
+ code = """ typedef cmds::%(func_name)s::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ %(last_arg_type)s params = result ? result->GetData() : NULL;
+"""
+ file.Write(code % {
+ 'last_arg_type': last_arg.type,
+ 'func_name': func.name,
+ })
+ func.WriteHandlerValidation(file)
+ code = """ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+"""
+ shadowed = func.GetInfo('shadowed')
+ if not shadowed:
+ file.Write(' LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("%s");\n' % func.name)
+ file.Write(code)
+ func.WriteHandlerImplementation(file)
+ if shadowed:
+ code = """ result->SetNumResults(num_values);
+ return error::kNoError;
+}
+"""
+ else:
+ code = """ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "%(func_name)s", "");
+ }
+ return error::kNoError;
+}
+
+"""
+ file.Write(code % {'func_name': func.name})
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ impl_decl = func.GetInfo('impl_decl')
+ if impl_decl == None or impl_decl == True:
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ all_but_last_args = func.GetOriginalArgs()[:-1]
+ arg_string = (
+ ", ".join(["%s" % arg.name for arg in all_but_last_args]))
+ all_arg_string = (
+ ", ".join([
+ "%s" % arg.name
+ for arg in func.GetOriginalArgs() if not arg.IsConstant()]))
+ self.WriteTraceEvent(func, file)
+ code = """ if (%(func_name)sHelper(%(all_arg_string)s)) {
+ return;
+ }
+ typedef cmds::%(func_name)s::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->%(func_name)s(%(arg_string)s,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+"""
+ file.Write(code % {
+ 'func_name': func.name,
+ 'arg_string': arg_string,
+ 'all_arg_string': all_arg_string,
+ })
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Writes the GLES2 Implemention unit test."""
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ struct Cmds {
+ cmds::%(name)s cmd;
+ };
+ typedef cmds::%(name)s::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(%(cmd_args)s, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->%(name)s(%(args)s, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+"""
+ first_cmd_arg = func.GetCmdArgs()[0].GetValidNonCachedClientSideCmdArg(func)
+ if not first_cmd_arg:
+ return
+
+ first_gl_arg = func.GetCmdArgs()[0].GetValidNonCachedClientSideArg(func)
+ cmd_arg_strings = [first_cmd_arg]
+ for arg in func.GetCmdArgs()[1:-2]:
+ cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func))
+ gl_arg_strings = [first_gl_arg]
+ for arg in func.GetOriginalArgs()[1:-1]:
+ gl_arg_strings.append(arg.GetValidClientSideArg(func))
+
+ file.Write(code % {
+ 'name': func.name,
+ 'args': ", ".join(gl_arg_strings),
+ 'cmd_args': ", ".join(cmd_arg_strings),
+ })
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ typedef cmds::%(name)s::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(local_gl_args)s));
+ result->size = 0;
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ %(valid_pname)s),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ gl_arg_strings = []
+ valid_pname = ''
+ for arg in func.GetOriginalArgs()[:-1]:
+ arg_value = arg.GetValidGLArg(func)
+ gl_arg_strings.append(arg_value)
+ if arg.name == 'pname':
+ valid_pname = arg_value
+ if func.GetInfo('gl_test_func') == 'glGetIntegerv':
+ gl_arg_strings.append("_")
+ else:
+ gl_arg_strings.append("result->GetData()")
+
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'local_gl_args': ", ".join(gl_arg_strings),
+ 'valid_pname': valid_pname,
+ }, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s::Result* result =
+ static_cast<cmds::%(name)s::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, *extras)
+
+class ArrayArgTypeHandler(TypeHandler):
+ """Base class for type handlers that handle args that are arrays"""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def GetArrayType(self, func):
+ """Returns the type of the element in the element array being PUT to."""
+ for arg in func.GetOriginalArgs():
+ if arg.IsPointer():
+ element_type = arg.GetPointedType()
+ return element_type
+
+ # Special case: array type handler is used for a function that is forwarded
+ # to the actual array type implementation
+ element_type = func.GetOriginalArgs()[-1].type
+ assert all(arg.type == element_type \
+ for arg in func.GetOriginalArgs()[-self.GetArrayCount(func):])
+ return element_type
+
+ def GetArrayCount(self, func):
+ """Returns the count of the elements in the array being PUT to."""
+ return func.GetInfo('count')
+
+class PUTHandler(ArrayArgTypeHandler):
+ """Handler for glTexParameter_v, glVertexAttrib_v functions."""
+
+ def __init__(self):
+ ArrayArgTypeHandler.__init__(self)
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Writes the service unit test for a command."""
+ expected_call = "EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));"
+ if func.GetInfo("first_element_only"):
+ gl_arg_strings = [
+ arg.GetValidGLArg(func) for arg in func.GetOriginalArgs()
+ ]
+ gl_arg_strings[-1] = "*" + gl_arg_strings[-1]
+ expected_call = ("EXPECT_CALL(*gl_, %%(gl_func_name)s(%s));" %
+ ", ".join(gl_arg_strings))
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
+ %(expected_call)s
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ extra = {
+ 'data_type': self.GetArrayType(func),
+ 'data_value': func.GetInfo('data_value') or '0',
+ 'expected_call': expected_call,
+ }
+ self.WriteValidUnitTest(func, file, valid_test, extra, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, extra, *extras)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Writes the service unit test for a command."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ %(data_type)s temp[%(data_count)s] = { %(data_value)s, };
+ cmd.Init(%(gl_args)s, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ %(gl_func_name)s(%(gl_args)s, %(data_ref)sreinterpret_cast<
+ %(data_type)s*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ gl_arg_strings = [
+ arg.GetValidGLArg(func) for arg in func.GetOriginalArgs()[0:-1]
+ ]
+ gl_any_strings = ["_"] * len(gl_arg_strings)
+
+ extra = {
+ 'data_ref': ("*" if func.GetInfo('first_element_only') else ""),
+ 'data_type': self.GetArrayType(func),
+ 'data_count': self.GetArrayCount(func),
+ 'data_value': func.GetInfo('data_value') or '0',
+ 'gl_args': ", ".join(gl_arg_strings),
+ 'gl_any_args': ", ".join(gl_any_strings),
+ }
+ self.WriteValidUnitTest(func, file, valid_test, extra, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ %(data_type)s temp[%(data_count)s] = { %(data_value)s, };
+ cmd.Init(%(all_but_last_args)s, &temp[0]);
+ EXPECT_EQ(error::%(parse_result)s,
+ ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, extra, *extras)
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(%s), %d, &data_size)) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code % (self.GetArrayType(func), self.GetArrayCount(func)))
+ if func.IsImmediate():
+ file.Write(" if (data_size > immediate_data_size) {\n")
+ file.Write(" return error::kOutOfBounds;\n")
+ file.Write(" }\n")
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ impl_func = func.GetInfo('impl_func')
+ if (impl_func != None and impl_func != True):
+ return;
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ last_arg_name = func.GetLastOriginalArg().name
+ values_str = ' << ", " << '.join(
+ ["%s[%d]" % (last_arg_name, ndx) \
+ for ndx in range(0, self.GetArrayCount(func))])
+ file.Write(' GPU_CLIENT_LOG("values: " << %s);\n' % values_str)
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ file.Write(" helper_->%sImmediate(%s);\n" %
+ (func.name, func.MakeOriginalArgString("")))
+ file.Write(" CheckGLError();\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Writes the GLES2 Implemention unit test."""
+ client_test = func.GetInfo('client_test')
+ if (client_test != None and client_test != True):
+ return;
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ %(type)s data[%(count)d] = {0};
+ struct Cmds {
+ cmds::%(name)sImmediate cmd;
+ %(type)s data[%(count)d];
+ };
+
+ for (int jj = 0; jj < %(count)d; ++jj) {
+ data[jj] = static_cast<%(type)s>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(%(cmd_args)s, &data[0]);
+ gl_->%(name)s(%(args)s, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+"""
+ cmd_arg_strings = [
+ arg.GetValidClientSideCmdArg(func) for arg in func.GetCmdArgs()[0:-2]
+ ]
+ gl_arg_strings = [
+ arg.GetValidClientSideArg(func) for arg in func.GetOriginalArgs()[0:-1]
+ ]
+
+ file.Write(code % {
+ 'name': func.name,
+ 'type': self.GetArrayType(func),
+ 'count': self.GetArrayCount(func),
+ 'args': ", ".join(gl_arg_strings),
+ 'cmd_args': ", ".join(cmd_arg_strings),
+ })
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" static uint32_t ComputeDataSize() {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(%s) * %d); // NOLINT\n" %
+ (self.GetArrayType(func), self.GetArrayCount(func)))
+ file.Write(" }\n")
+ file.Write("\n")
+ file.Write(" static uint32_t ComputeSize() {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(
+ " sizeof(ValueType) + ComputeDataSize()); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void SetHeader() {\n")
+ file.Write(
+ " header.SetCmdByTotalSize<ValueType>(ComputeSize());\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ file.Write(" void Init(%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_"),
+ last_arg.type, last_arg.name))
+ file.Write(" SetHeader();\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" memcpy(ImmediateDataAddress(this),\n")
+ file.Write(" _%s, ComputeDataSize());\n" % last_arg.name)
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_", True),
+ last_arg.type, last_arg.name))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
+ (copy_args, last_arg.name))
+ file.Write(" const uint32_t size = ComputeSize();\n")
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t size = gles2::cmds::%(name)s::ComputeSize();
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedOriginalArgString(""),
+ "args": func.MakeOriginalArgString(""),
+ })
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
+ file.Write(" static %s data[] = {\n" % self.GetArrayType(func))
+ for v in range(0, self.GetArrayCount(func)):
+ file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
+ (self.GetArrayType(func), v))
+ file.Write(" };\n")
+ file.Write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
+ (func.name, func.name))
+ file.Write(" void* next_cmd = cmd.Set(\n")
+ file.Write(" &cmd")
+ args = func.GetCmdArgs()
+ for value, arg in enumerate(args):
+ file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
+ file.Write(",\n data);\n")
+ args = func.GetCmdArgs()
+ file.Write(" EXPECT_EQ(static_cast<uint32_t>(cmds::%s::kCmdId),\n"
+ % func.name)
+ file.Write(" cmd.header.command);\n")
+ file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)),\n")
+ file.Write(" cmd.header.size * 4u);\n")
+ for value, arg in enumerate(args):
+ file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
+ (arg.type, value + 11, arg.name))
+ file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
+ file.Write(" next_cmd, sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
+ file.Write(" // TODO(gman): Check that data was inserted;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class PUTnHandler(ArrayArgTypeHandler):
+ """Handler for PUTn 'glUniform__v' type functions."""
+
+ def __init__(self):
+ ArrayArgTypeHandler.__init__(self)
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overridden from TypeHandler."""
+ ArrayArgTypeHandler.WriteServiceUnitTest(self, func, file, *extras)
+
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgsCountTooLarge) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ gl_arg_strings = []
+ arg_strings = []
+ for count, arg in enumerate(func.GetOriginalArgs()):
+ # hardcoded to match unit tests.
+ if count == 0:
+ # the location of the second element of the 2nd uniform.
+ # defined in GLES2DecoderBase::SetupShaderForUniform
+ gl_arg_strings.append("3")
+ arg_strings.append("ProgramManager::MakeFakeLocation(1, 1)")
+ elif count == 1:
+ # the number of elements that gl will be called with.
+ gl_arg_strings.append("3")
+ # the number of elements requested in the command.
+ arg_strings.append("5")
+ else:
+ gl_arg_strings.append(arg.GetValidGLArg(func))
+ if not arg.IsConstant():
+ arg_strings.append(arg.GetValidArg(func))
+ extra = {
+ 'gl_args': ", ".join(gl_arg_strings),
+ 'args': ", ".join(arg_strings),
+ }
+ self.WriteValidUnitTest(func, file, valid_test, extra, *extras)
+
+ def WriteImmediateServiceUnitTest(self, func, file, *extras):
+ """Overridden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ EXPECT_CALL(
+ *gl_,
+ %(gl_func_name)s(%(gl_args)s,
+ reinterpret_cast<%(data_type)s*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ %(data_type)s temp[%(data_count)s * 2] = { 0, };
+ cmd.Init(%(args)s, &temp[0]);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ gl_arg_strings = []
+ gl_any_strings = []
+ arg_strings = []
+ for arg in func.GetOriginalArgs()[0:-1]:
+ gl_arg_strings.append(arg.GetValidGLArg(func))
+ gl_any_strings.append("_")
+ if not arg.IsConstant():
+ arg_strings.append(arg.GetValidArg(func))
+ extra = {
+ 'data_type': self.GetArrayType(func),
+ 'data_count': self.GetArrayCount(func),
+ 'args': ", ".join(arg_strings),
+ 'gl_args': ", ".join(gl_arg_strings),
+ 'gl_any_args': ", ".join(gl_any_strings),
+ }
+ self.WriteValidUnitTest(func, file, valid_test, extra, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ %(data_type)s temp[%(data_count)s * 2] = { 0, };
+ cmd.Init(%(all_but_last_args)s, &temp[0]);
+ EXPECT_EQ(error::%(parse_result)s,
+ ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, extra, *extras)
+
+ def WriteGetDataSizeCode(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(%s), %d, &data_size)) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code % (self.GetArrayType(func), self.GetArrayCount(func)))
+ if func.IsImmediate():
+ file.Write(" if (data_size > immediate_data_size) {\n")
+ file.Write(" return error::kOutOfBounds;\n")
+ file.Write(" }\n")
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ last_arg_name = func.GetLastOriginalArg().name
+ file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+""")
+ values_str = ' << ", " << '.join(
+ ["%s[%d + i * %d]" % (
+ last_arg_name, ndx, self.GetArrayCount(func)) for ndx in range(
+ 0, self.GetArrayCount(func))])
+ file.Write(' GPU_CLIENT_LOG(" " << i << ": " << %s);\n' % values_str)
+ file.Write(" }\n });\n")
+ for arg in func.GetOriginalArgs():
+ arg.WriteClientSideValidationCode(file, func)
+ file.Write(" helper_->%sImmediate(%s);\n" %
+ (func.name, func.MakeInitString("")))
+ file.Write(" CheckGLError();\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Writes the GLES2 Implemention unit test."""
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ %(type)s data[%(count_param)d][%(count)d] = {{0}};
+ struct Cmds {
+ cmds::%(name)sImmediate cmd;
+ %(type)s data[%(count_param)d][%(count)d];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < %(count_param)d; ++ii) {
+ for (int jj = 0; jj < %(count)d; ++jj) {
+ data[ii][jj] = static_cast<%(type)s>(ii * %(count)d + jj);
+ }
+ }
+ expected.cmd.Init(%(cmd_args)s, &data[0][0]);
+ gl_->%(name)s(%(args)s, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+"""
+ cmd_arg_strings = [
+ arg.GetValidClientSideCmdArg(func) for arg in func.GetCmdArgs()[0:-2]
+ ]
+ gl_arg_strings = []
+ count_param = 0
+ for arg in func.GetOriginalArgs()[0:-1]:
+ valid_value = arg.GetValidClientSideArg(func)
+ gl_arg_strings.append(valid_value)
+ if arg.name == "count":
+ count_param = int(valid_value)
+ file.Write(code % {
+ 'name': func.name,
+ 'type': self.GetArrayType(func),
+ 'count': self.GetArrayCount(func),
+ 'args': ", ".join(gl_arg_strings),
+ 'cmd_args': ", ".join(cmd_arg_strings),
+ 'count_param': count_param,
+ })
+
+ # Test constants for invalid values, as they are not tested by the
+ # service.
+ constants = [
+ arg for arg in func.GetOriginalArgs()[0:-1] if arg.IsConstant()
+ ]
+ if not constants:
+ return
+
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
+ %(type)s data[%(count_param)d][%(count)d] = {{0}};
+ for (int ii = 0; ii < %(count_param)d; ++ii) {
+ for (int jj = 0; jj < %(count)d; ++jj) {
+ data[ii][jj] = static_cast<%(type)s>(ii * %(count)d + jj);
+ }
+ }
+ gl_->%(name)s(%(args)s, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(%(gl_error)s, CheckError());
+}
+"""
+ for invalid_arg in constants:
+ gl_arg_strings = []
+ invalid = invalid_arg.GetInvalidArg(func)
+ for arg in func.GetOriginalArgs()[0:-1]:
+ if arg is invalid_arg:
+ gl_arg_strings.append(invalid[0])
+ else:
+ valid_value = arg.GetValidClientSideArg(func)
+ gl_arg_strings.append(valid_value)
+ if arg.name == "count":
+ count_param = int(valid_value)
+
+ file.Write(code % {
+ 'name': func.name,
+ 'invalid_index': func.GetOriginalArgs().index(invalid_arg),
+ 'type': self.GetArrayType(func),
+ 'count': self.GetArrayCount(func),
+ 'args': ", ".join(gl_arg_strings),
+ 'gl_error': invalid[2],
+ 'count_param': count_param,
+ })
+
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" static uint32_t ComputeDataSize(GLsizei count) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(%s) * %d * count); // NOLINT\n" %
+ (self.GetArrayType(func), self.GetArrayCount(func)))
+ file.Write(" }\n")
+ file.Write("\n")
+ file.Write(" static uint32_t ComputeSize(GLsizei count) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(
+ " sizeof(ValueType) + ComputeDataSize(count)); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" void SetHeader(GLsizei count) {\n")
+ file.Write(
+ " header.SetCmdByTotalSize<ValueType>(ComputeSize(count));\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ file.Write(" void Init(%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_"),
+ last_arg.type, last_arg.name))
+ file.Write(" SetHeader(_count);\n")
+ args = func.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" memcpy(ImmediateDataAddress(this),\n")
+ file.Write(" _%s, ComputeDataSize(_count));\n" % last_arg.name)
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ copy_args = func.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
+ (func.MakeTypedCmdArgString("_", True),
+ last_arg.type, last_arg.name))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
+ (copy_args, last_arg.name))
+ file.Write(" const uint32_t size = ComputeSize(_count);\n")
+ file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
+ "cmd, size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t size = gles2::cmds::%(name)s::ComputeSize(count);
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ if (c) {
+ c->Init(%(args)s);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedInitString(""),
+ "args": func.MakeInitString("")
+ })
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ args = func.GetCmdArgs()
+ count_param = 0
+ for arg in args:
+ if arg.name == "count":
+ count_param = int(arg.GetValidClientSideCmdArg(func))
+ file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
+ file.Write(" static %s data[] = {\n" % self.GetArrayType(func))
+ for v in range(0, self.GetArrayCount(func) * count_param):
+ file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
+ (self.GetArrayType(func), v))
+ file.Write(" };\n")
+ file.Write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
+ (func.name, func.name))
+ file.Write(" const GLsizei kNumElements = %d;\n" % count_param)
+ file.Write(" const size_t kExpectedCmdSize =\n")
+ file.Write(" sizeof(cmd) + kNumElements * sizeof(%s) * %d;\n" %
+ (self.GetArrayType(func), self.GetArrayCount(func)))
+ file.Write(" void* next_cmd = cmd.Set(\n")
+ file.Write(" &cmd")
+ for value, arg in enumerate(args):
+ file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 1))
+ file.Write(",\n data);\n")
+ file.Write(" EXPECT_EQ(static_cast<uint32_t>(cmds::%s::kCmdId),\n" %
+ func.name)
+ file.Write(" cmd.header.command);\n")
+ file.Write(" EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);\n")
+ for value, arg in enumerate(args):
+ file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
+ (arg.type, value + 1, arg.name))
+ file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
+ file.Write(" next_cmd, sizeof(cmd) +\n")
+ file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
+ file.Write(" // TODO(gman): Check that data was inserted;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+
+class PUTXnHandler(ArrayArgTypeHandler):
+ """Handler for glUniform?f functions."""
+ def __init__(self):
+ ArrayArgTypeHandler.__init__(self)
+
+ def WriteHandlerImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ %(type)s temp[%(count)s] = { %(values)s};
+ Do%(name)sv(%(location)s, 1, &temp[0]);
+"""
+ values = ""
+ args = func.GetOriginalArgs()
+ count = int(self.GetArrayCount(func))
+ num_args = len(args)
+ for ii in range(count):
+ values += "%s, " % args[len(args) - count + ii].name
+
+ file.Write(code % {
+ 'name': func.name,
+ 'count': self.GetArrayCount(func),
+ 'type': self.GetArrayType(func),
+ 'location': args[0].name,
+ 'args': func.MakeOriginalArgString(""),
+ 'values': values,
+ })
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(name)sv(%(local_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ args = func.GetOriginalArgs()
+ local_args = "%s, 1, _" % args[0].GetValidGLArg(func)
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'name': func.name,
+ 'count': self.GetArrayCount(func),
+ 'local_args': local_args,
+ }, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(name)sv(_, _, _).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, {
+ 'name': func.GetInfo('name'),
+ 'count': self.GetArrayCount(func),
+ })
+
+
+class GLcharHandler(CustomHandler):
+ """Handler for functions that pass a single string ."""
+
+ def __init__(self):
+ CustomHandler.__init__(self)
+
+ def WriteImmediateCmdComputeSize(self, func, file):
+ """Overrriden from TypeHandler."""
+ file.Write(" static uint32_t ComputeSize(uint32_t data_size) {\n")
+ file.Write(" return static_cast<uint32_t>(\n")
+ file.Write(" sizeof(ValueType) + data_size); // NOLINT\n")
+ file.Write(" }\n")
+
+ def WriteImmediateCmdSetHeader(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """
+ void SetHeader(uint32_t data_size) {
+ header.SetCmdBySize<ValueType>(data_size);
+ }
+"""
+ file.Write(code)
+
+ def WriteImmediateCmdInit(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ args = func.GetCmdArgs()
+ set_code = []
+ for arg in args:
+ set_code.append(" %s = _%s;" % (arg.name, arg.name))
+ code = """
+ void Init(%(typed_args)s, uint32_t _data_size) {
+ SetHeader(_data_size);
+%(set_code)s
+ memcpy(ImmediateDataAddress(this), _%(last_arg)s, _data_size);
+ }
+
+"""
+ file.Write(code % {
+ "typed_args": func.MakeTypedArgString("_"),
+ "set_code": "\n".join(set_code),
+ "last_arg": last_arg.name
+ })
+
+ def WriteImmediateCmdSet(self, func, file):
+ """Overrriden from TypeHandler."""
+ last_arg = func.GetLastOriginalArg()
+ file.Write(" void* Set(void* cmd%s, uint32_t _data_size) {\n" %
+ func.MakeTypedCmdArgString("_", True))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _data_size);\n" %
+ func.MakeCmdArgString("_"))
+ file.Write(" return NextImmediateCmdAddress<ValueType>("
+ "cmd, _data_size);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteImmediateCmdHelper(self, func, file):
+ """Overrriden from TypeHandler."""
+ code = """ void %(name)s(%(typed_args)s) {
+ const uint32_t data_size = strlen(name);
+ gles2::cmds::%(name)s* c =
+ GetImmediateCmdSpace<gles2::cmds::%(name)s>(data_size);
+ if (c) {
+ c->Init(%(args)s, data_size);
+ }
+ }
+
+"""
+ file.Write(code % {
+ "name": func.name,
+ "typed_args": func.MakeTypedOriginalArgString(""),
+ "args": func.MakeOriginalArgString(""),
+ })
+
+
+ def WriteImmediateFormatTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ init_code = []
+ check_code = []
+ all_but_last_arg = func.GetCmdArgs()[:-1]
+ for value, arg in enumerate(all_but_last_arg):
+ init_code.append(" static_cast<%s>(%d)," % (arg.type, value + 11))
+ for value, arg in enumerate(all_but_last_arg):
+ check_code.append(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);" %
+ (arg.type, value + 11, arg.name))
+ code = """
+TEST_F(GLES2FormatTest, %(func_name)s) {
+ cmds::%(func_name)s& cmd = *GetBufferAs<cmds::%(func_name)s>();
+ static const char* const test_str = \"test string\";
+ void* next_cmd = cmd.Set(
+ &cmd,
+%(init_code)s
+ test_str,
+ strlen(test_str));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::%(func_name)s::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) +
+ RoundSizeToMultipleOfEntries(strlen(test_str)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd) +
+ RoundSizeToMultipleOfEntries(strlen(test_str)));
+%(check_code)s
+ EXPECT_EQ(static_cast<uint32_t>(strlen(test_str)), cmd.data_size);
+ EXPECT_EQ(0, memcmp(test_str, ImmediateDataAddress(&cmd), strlen(test_str)));
+ CheckBytesWritten(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(strlen(test_str)),
+ sizeof(cmd) + strlen(test_str));
+}
+
+"""
+ file.Write(code % {
+ 'func_name': func.name,
+ 'init_code': "\n".join(init_code),
+ 'check_code': "\n".join(check_code),
+ })
+
+
+class GLcharNHandler(CustomHandler):
+ """Handler for functions that pass a single string with an optional len."""
+
+ def __init__(self):
+ CustomHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ func.cmd_args = []
+ func.AddCmdArg(Argument('bucket_id', 'GLuint'))
+
+ def NeedsDataTransferFunction(self, func):
+ """Overriden from TypeHandler."""
+ return False
+
+ def AddBucketFunction(self, generator, func):
+ """Overrriden from TypeHandler."""
+ pass
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ file.Write("""
+ GLuint bucket_id = static_cast<GLuint>(c.%(bucket_id)s);
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string str;
+ if (!bucket->GetAsString(&str)) {
+ return error::kInvalidArguments;
+ }
+ %(gl_func_name)s(0, str.c_str());
+ return error::kNoError;
+}
+
+""" % {
+ 'name': func.name,
+ 'gl_func_name': func.GetGLFunctionName(),
+ 'bucket_id': func.cmd_args[0].name,
+ })
+
+
+class IsHandler(TypeHandler):
+ """Handler for glIs____ type and glGetError functions."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ func.AddCmdArg(Argument("result_shm_id", 'uint32_t'))
+ func.AddCmdArg(Argument("result_shm_offset", 'uint32_t'))
+ if func.GetInfo('result') == None:
+ func.AddInfo('result', ['uint32_t'])
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ comma = ""
+ if len(func.GetOriginalArgs()):
+ comma =", "
+ self.WriteValidUnitTest(func, file, valid_test, {
+ 'comma': comma,
+ }, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
+}
+"""
+ self.WriteInvalidUnitTest(func, file, invalid_test, {
+ 'comma': comma,
+ }, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
+ SpecializedSetup<cmds::%(name)s, 0>(false);
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s%(comma)skInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(%(args)s%(comma)sshared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, {
+ 'comma': comma,
+ }, *extras)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ self.WriteServiceHandlerFunctionHeader(func, file)
+ args = func.GetOriginalArgs()
+ for arg in args:
+ arg.WriteGetCode(file)
+
+ code = """ typedef cmds::%(func_name)s::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+"""
+ file.Write(code % {'func_name': func.name})
+ func.WriteHandlerValidation(file)
+ file.Write(" *result_dst = %s(%s);\n" %
+ (func.GetGLFunctionName(), func.MakeOriginalArgString("")))
+ file.Write(" return error::kNoError;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ impl_func = func.GetInfo('impl_func')
+ if impl_func == None or impl_func == True:
+ error_value = func.GetInfo("error_value") or "GL_FALSE"
+ file.Write("%s GLES2Implementation::%s(%s) {\n" %
+ (func.return_type, func.original_name,
+ func.MakeTypedOriginalArgString("")))
+ file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
+ self.WriteTraceEvent(func, file)
+ func.WriteDestinationInitalizationValidation(file)
+ self.WriteClientGLCallLog(func, file)
+ file.Write(" typedef cmds::%s::Result Result;\n" % func.name)
+ file.Write(" Result* result = GetResultAs<Result*>();\n")
+ file.Write(" if (!result) {\n")
+ file.Write(" return %s;\n" % error_value)
+ file.Write(" }\n")
+ file.Write(" *result = 0;\n")
+ arg_string = func.MakeOriginalArgString("")
+ comma = ""
+ if len(arg_string) > 0:
+ comma = ", "
+ file.Write(
+ " helper_->%s(%s%sGetResultShmId(), GetResultShmOffset());\n" %
+ (func.name, arg_string, comma))
+ file.Write(" WaitForCmd();\n")
+ file.Write(" %s result_value = *result;\n" % func.return_type)
+ file.Write(' GPU_CLIENT_LOG("returned " << result_value);\n')
+ file.Write(" CheckGLError();\n")
+ file.Write(" return result_value;\n")
+ file.Write("}\n")
+ file.Write("\n")
+
+ def WriteGLES2ImplementationUnitTest(self, func, file):
+ """Overrriden from TypeHandler."""
+ client_test = func.GetInfo('client_test')
+ if client_test == None or client_test == True:
+ code = """
+TEST_F(GLES2ImplementationTest, %(name)s) {
+ struct Cmds {
+ cmds::%(name)s cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::%(name)s::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->%(name)s(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+"""
+ file.Write(code % {
+ 'name': func.name,
+ })
+
+
+class STRnHandler(TypeHandler):
+ """Handler for GetProgramInfoLog, GetShaderInfoLog, GetShaderSource, and
+ GetTranslatedShaderSourceANGLE."""
+
+ def __init__(self):
+ TypeHandler.__init__(self)
+
+ def InitFunction(self, func):
+ """Overrriden from TypeHandler."""
+ # remove all but the first cmd args.
+ cmd_args = func.GetCmdArgs()
+ func.ClearCmdArgs()
+ func.AddCmdArg(cmd_args[0])
+ # add on a bucket id.
+ func.AddCmdArg(Argument('bucket_id', 'uint32_t'))
+
+ def WriteGLES2Implementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ code_1 = """%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+"""
+ code_2 = """ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] gl%(func_name)s" << "("
+ << %(arg0)s << ", "
+ << %(arg1)s << ", "
+ << static_cast<void*>(%(arg2)s) << ", "
+ << static_cast<void*>(%(arg3)s) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->%(func_name)s(%(id_name)s, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size =
+ std::min(static_cast<size_t>(%(bufsize_name)s) - 1, str.size());
+ memcpy(%(dest_name)s, str.c_str(), max_size);
+ %(dest_name)s[max_size] = '\\0';
+ GPU_CLIENT_LOG("------\\n" << %(dest_name)s << "\\n------");
+ }
+ }
+ if (%(length_name)s != NULL) {
+ *%(length_name)s = max_size;
+ }
+ CheckGLError();
+}
+"""
+ args = func.GetOriginalArgs()
+ str_args = {
+ 'return_type': func.return_type,
+ 'func_name': func.original_name,
+ 'args': func.MakeTypedOriginalArgString(""),
+ 'id_name': args[0].name,
+ 'bufsize_name': args[1].name,
+ 'length_name': args[2].name,
+ 'dest_name': args[3].name,
+ 'arg0': args[0].name,
+ 'arg1': args[1].name,
+ 'arg2': args[2].name,
+ 'arg3': args[3].name,
+ }
+ file.Write(code_1 % str_args)
+ func.WriteDestinationInitalizationValidation(file)
+ file.Write(code_2 % str_args)
+
+ def WriteServiceUnitTest(self, func, file, *extras):
+ """Overrriden from TypeHandler."""
+ valid_test = """
+TEST_P(%(test_name)s, %(name)sValidArgs) {
+ const char* kInfo = "hello";
+ const uint32_t kBucketId = 123;
+ SpecializedSetup<cmds::%(name)s, 0>(true);
+%(expect_len_code)s
+ EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
+ .WillOnce(DoAll(SetArgumentPointee<2>(strlen(kInfo)),
+ SetArrayArgument<3>(kInfo, kInfo + strlen(kInfo) + 1)));
+ cmds::%(name)s cmd;
+ cmd.Init(%(args)s);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
+ EXPECT_EQ(0, memcmp(bucket->GetData(0, bucket->size()), kInfo,
+ bucket->size()));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+"""
+ args = func.GetOriginalArgs()
+ id_name = args[0].GetValidGLArg(func)
+ get_len_func = func.GetInfo('get_len_func')
+ get_len_enum = func.GetInfo('get_len_enum')
+ sub = {
+ 'id_name': id_name,
+ 'get_len_func': get_len_func,
+ 'get_len_enum': get_len_enum,
+ 'gl_args': '%s, strlen(kInfo) + 1, _, _' %
+ args[0].GetValidGLArg(func),
+ 'args': '%s, kBucketId' % args[0].GetValidArg(func),
+ 'expect_len_code': '',
+ }
+ if get_len_func and get_len_func[0:2] == 'gl':
+ sub['expect_len_code'] = (
+ " EXPECT_CALL(*gl_, %s(%s, %s, _))\n"
+ " .WillOnce(SetArgumentPointee<2>(strlen(kInfo) + 1));") % (
+ get_len_func[2:], id_name, get_len_enum)
+ self.WriteValidUnitTest(func, file, valid_test, sub, *extras)
+
+ invalid_test = """
+TEST_P(%(test_name)s, %(name)sInvalidArgs) {
+ const uint32_t kBucketId = 123;
+ EXPECT_CALL(*gl_, %(gl_func_name)s(_, _, _, _))
+ .Times(0);
+ cmds::%(name)s cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+"""
+ self.WriteValidUnitTest(func, file, invalid_test, *extras)
+
+ def WriteServiceImplementation(self, func, file):
+ """Overrriden from TypeHandler."""
+ pass
+
+class NamedType(object):
+ """A class that represents a type of an argument in a client function.
+
+ A type of an argument that is to be passed through in the command buffer
+ command. Currently used only for the arguments that are specificly named in
+ the 'cmd_buffer_functions.txt' file, mostly enums.
+ """
+
+ def __init__(self, info):
+ assert not 'is_complete' in info or info['is_complete'] == True
+ self.info = info
+ self.valid = info['valid']
+ if 'invalid' in info:
+ self.invalid = info['invalid']
+ else:
+ self.invalid = []
+
+ def GetType(self):
+ return self.info['type']
+
+ def GetInvalidValues(self):
+ return self.invalid
+
+ def GetValidValues(self):
+ return self.valid
+
+ def IsConstant(self):
+ if not 'is_complete' in self.info:
+ return False
+
+ return len(self.GetValidValues()) == 1
+
+ def GetConstantValue(self):
+ return self.GetValidValues()[0]
+
+class Argument(object):
+ """A class that represents a function argument."""
+
+ cmd_type_map_ = {
+ 'GLenum': 'uint32_t',
+ 'GLint': 'int32_t',
+ 'GLintptr': 'int32_t',
+ 'GLsizei': 'int32_t',
+ 'GLsizeiptr': 'int32_t',
+ 'GLfloat': 'float',
+ 'GLclampf': 'float',
+ }
+ need_validation_ = ['GLsizei*', 'GLboolean*', 'GLenum*', 'GLint*']
+
+ def __init__(self, name, type):
+ self.name = name
+ self.optional = type.endswith("Optional*")
+ if self.optional:
+ type = type[:-9] + "*"
+ self.type = type
+
+ if type in self.cmd_type_map_:
+ self.cmd_type = self.cmd_type_map_[type]
+ else:
+ self.cmd_type = 'uint32_t'
+
+ def IsPointer(self):
+ """Returns true if argument is a pointer."""
+ return False
+
+ def IsConstant(self):
+ """Returns true if the argument has only one valid value."""
+ return False
+
+ def AddCmdArgs(self, args):
+ """Adds command arguments for this argument to the given list."""
+ if not self.IsConstant():
+ return args.append(self)
+
+ def AddInitArgs(self, args):
+ """Adds init arguments for this argument to the given list."""
+ if not self.IsConstant():
+ return args.append(self)
+
+ def GetValidArg(self, func):
+ """Gets a valid value for this argument."""
+ valid_arg = func.GetValidArg(self)
+ if valid_arg != None:
+ return valid_arg
+
+ index = func.GetOriginalArgs().index(self)
+ return str(index + 1)
+
+ def GetValidClientSideArg(self, func):
+ """Gets a valid value for this argument."""
+ valid_arg = func.GetValidArg(self)
+ if valid_arg != None:
+ return valid_arg
+
+ index = func.GetOriginalArgs().index(self)
+ return str(index + 1)
+
+ def GetValidClientSideCmdArg(self, func):
+ """Gets a valid value for this argument."""
+ valid_arg = func.GetValidArg(self)
+ if valid_arg != None:
+ return valid_arg
+ try:
+ index = func.GetOriginalArgs().index(self)
+ return str(index + 1)
+ except ValueError:
+ pass
+ index = func.GetCmdArgs().index(self)
+ return str(index + 1)
+
+ def GetValidGLArg(self, func):
+ """Gets a valid GL value for this argument."""
+ return self.GetValidArg(func)
+
+ def GetValidNonCachedClientSideArg(self, func):
+ """Returns a valid value for this argument in a GL call.
+ Using the value will produce a command buffer service invocation.
+ Returns None if there is no such value."""
+ return '123'
+
+ def GetValidNonCachedClientSideCmdArg(self, func):
+ """Returns a valid value for this argument in a command buffer command.
+ Calling the GL function with the value returned by
+ GetValidNonCachedClientSideArg will result in a command buffer command
+ that contains the value returned by this function. """
+ return '123'
+
+ def GetNumInvalidValues(self, func):
+ """returns the number of invalid values to be tested."""
+ return 0
+
+ def GetInvalidArg(self, index):
+ """returns an invalid value and expected parse result by index."""
+ return ("---ERROR0---", "---ERROR2---", None)
+
+ def GetLogArg(self):
+ """Get argument appropriate for LOG macro."""
+ if self.type == 'GLboolean':
+ return 'GLES2Util::GetStringBool(%s)' % self.name
+ if self.type == 'GLenum':
+ return 'GLES2Util::GetStringEnum(%s)' % self.name
+ return self.name
+
+ def WriteGetCode(self, file):
+ """Writes the code to get an argument from a command structure."""
+ file.Write(" %s %s = static_cast<%s>(c.%s);\n" %
+ (self.type, self.name, self.type, self.name))
+
+ def WriteValidationCode(self, file, func):
+ """Writes the validation code for an argument."""
+ pass
+
+ def WriteClientSideValidationCode(self, file, func):
+ """Writes the validation code for an argument."""
+ pass
+
+ def WriteDestinationInitalizationValidation(self, file, func):
+ """Writes the client side destintion initialization validation."""
+ pass
+
+ def WriteDestinationInitalizationValidatationIfNeeded(self, file, func):
+ """Writes the client side destintion initialization validation if needed."""
+ parts = self.type.split(" ")
+ if len(parts) > 1:
+ return
+ if parts[0] in self.need_validation_:
+ file.Write(
+ " GPU_CLIENT_VALIDATE_DESTINATION_%sINITALIZATION(%s, %s);\n" %
+ ("OPTIONAL_" if self.optional else "", self.type[:-1], self.name))
+
+
+ def WriteGetAddress(self, file):
+ """Writes the code to get the address this argument refers to."""
+ pass
+
+ def GetImmediateVersion(self):
+ """Gets the immediate version of this argument."""
+ return self
+
+ def GetBucketVersion(self):
+ """Gets the bucket version of this argument."""
+ return self
+
+
+class BoolArgument(Argument):
+ """class for GLboolean"""
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name, 'GLboolean')
+
+ def GetValidArg(self, func):
+ """Gets a valid value for this argument."""
+ return 'true'
+
+ def GetValidClientSideArg(self, func):
+ """Gets a valid value for this argument."""
+ return 'true'
+
+ def GetValidClientSideCmdArg(self, func):
+ """Gets a valid value for this argument."""
+ return 'true'
+
+ def GetValidGLArg(self, func):
+ """Gets a valid GL value for this argument."""
+ return 'true'
+
+
+class UniformLocationArgument(Argument):
+ """class for uniform locations."""
+
+ def __init__(self, name):
+ Argument.__init__(self, name, "GLint")
+
+ def WriteGetCode(self, file):
+ """Writes the code to get an argument from a command structure."""
+ code = """ %s %s = static_cast<%s>(c.%s);
+"""
+ file.Write(code % (self.type, self.name, self.type, self.name))
+
+class DataSizeArgument(Argument):
+ """class for data_size which Bucket commands do not need."""
+
+ def __init__(self, name):
+ Argument.__init__(self, name, "uint32_t")
+
+ def GetBucketVersion(self):
+ return None
+
+
+class SizeArgument(Argument):
+ """class for GLsizei and GLsizeiptr."""
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name, type)
+
+ def GetNumInvalidValues(self, func):
+ """overridden from Argument."""
+ if func.IsImmediate():
+ return 0
+ return 1
+
+ def GetInvalidArg(self, index):
+ """overridden from Argument."""
+ return ("-1", "kNoError", "GL_INVALID_VALUE")
+
+ def WriteValidationCode(self, file, func):
+ """overridden from Argument."""
+ file.Write(" if (%s < 0) {\n" % self.name)
+ file.Write(
+ " LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
+ (func.original_name, self.name))
+ file.Write(" return error::kNoError;\n")
+ file.Write(" }\n")
+
+ def WriteClientSideValidationCode(self, file, func):
+ """overridden from Argument."""
+ file.Write(" if (%s < 0) {\n" % self.name)
+ file.Write(
+ " SetGLError(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
+ (func.original_name, self.name))
+ file.Write(" return;\n")
+ file.Write(" }\n")
+
+
+class SizeNotNegativeArgument(SizeArgument):
+ """class for GLsizeiNotNegative. It's NEVER allowed to be negative"""
+
+ def __init__(self, name, type, gl_type):
+ SizeArgument.__init__(self, name, gl_type)
+
+ def GetInvalidArg(self, index):
+ """overridden from SizeArgument."""
+ return ("-1", "kOutOfBounds", "GL_NO_ERROR")
+
+ def WriteValidationCode(self, file, func):
+ """overridden from SizeArgument."""
+ pass
+
+
+class EnumBaseArgument(Argument):
+ """Base class for EnumArgument, IntArgument and ValidatedBoolArgument"""
+
+ def __init__(self, name, gl_type, type, gl_error):
+ Argument.__init__(self, name, gl_type)
+
+ self.local_type = type
+ self.gl_error = gl_error
+ name = type[len(gl_type):]
+ self.type_name = name
+ self.named_type = NamedType(_NAMED_TYPE_INFO[name])
+
+ def IsConstant(self):
+ return self.named_type.IsConstant()
+
+ def GetConstantValue(self):
+ return self.named_type.GetConstantValue()
+
+ def WriteValidationCode(self, file, func):
+ if self.named_type.IsConstant():
+ return
+ file.Write(" if (!validators_->%s.IsValid(%s)) {\n" %
+ (ToUnderscore(self.type_name), self.name))
+ if self.gl_error == "GL_INVALID_ENUM":
+ file.Write(
+ " LOCAL_SET_GL_ERROR_INVALID_ENUM(\"gl%s\", %s, \"%s\");\n" %
+ (func.original_name, self.name, self.name))
+ else:
+ file.Write(
+ " LOCAL_SET_GL_ERROR(%s, \"gl%s\", \"%s %s\");\n" %
+ (self.gl_error, func.original_name, self.name, self.gl_error))
+ file.Write(" return error::kNoError;\n")
+ file.Write(" }\n")
+
+ def WriteClientSideValidationCode(self, file, func):
+ if not self.named_type.IsConstant():
+ return
+ file.Write(" if (%s != %s) {" % (self.name,
+ self.GetConstantValue()))
+ file.Write(
+ " SetGLError(%s, \"gl%s\", \"%s %s\");\n" %
+ (self.gl_error, func.original_name, self.name, self.gl_error))
+ if func.return_type == "void":
+ file.Write(" return;\n")
+ else:
+ file.Write(" return %s;\n" % func.GetErrorReturnString())
+ file.Write(" }\n")
+
+ def GetValidArg(self, func):
+ valid_arg = func.GetValidArg(self)
+ if valid_arg != None:
+ return valid_arg
+ valid = self.named_type.GetValidValues()
+ if valid:
+ num_valid = len(valid)
+ return valid[0]
+
+ index = func.GetOriginalArgs().index(self)
+ return str(index + 1)
+
+ def GetValidClientSideArg(self, func):
+ """Gets a valid value for this argument."""
+ return self.GetValidArg(func)
+
+ def GetValidClientSideCmdArg(self, func):
+ """Gets a valid value for this argument."""
+ valid_arg = func.GetValidArg(self)
+ if valid_arg != None:
+ return valid_arg
+
+ valid = self.named_type.GetValidValues()
+ if valid:
+ num_valid = len(valid)
+ return valid[0]
+
+ try:
+ index = func.GetOriginalArgs().index(self)
+ return str(index + 1)
+ except ValueError:
+ pass
+ index = func.GetCmdArgs().index(self)
+ return str(index + 1)
+
+ def GetValidGLArg(self, func):
+ """Gets a valid value for this argument."""
+ return self.GetValidArg(func)
+
+ def GetNumInvalidValues(self, func):
+ """returns the number of invalid values to be tested."""
+ return len(self.named_type.GetInvalidValues())
+
+ def GetInvalidArg(self, index):
+ """returns an invalid value by index."""
+ invalid = self.named_type.GetInvalidValues()
+ if invalid:
+ num_invalid = len(invalid)
+ if index >= num_invalid:
+ index = num_invalid - 1
+ return (invalid[index], "kNoError", self.gl_error)
+ return ("---ERROR1---", "kNoError", self.gl_error)
+
+
+class EnumArgument(EnumBaseArgument):
+ """A class that represents a GLenum argument"""
+
+ def __init__(self, name, type):
+ EnumBaseArgument.__init__(self, name, "GLenum", type, "GL_INVALID_ENUM")
+
+ def GetLogArg(self):
+ """Overridden from Argument."""
+ return ("GLES2Util::GetString%s(%s)" %
+ (self.type_name, self.name))
+
+
+class IntArgument(EnumBaseArgument):
+ """A class for a GLint argument that can only except specific values.
+
+ For example glTexImage2D takes a GLint for its internalformat
+ argument instead of a GLenum.
+ """
+
+ def __init__(self, name, type):
+ EnumBaseArgument.__init__(self, name, "GLint", type, "GL_INVALID_VALUE")
+
+
+class ValidatedBoolArgument(EnumBaseArgument):
+ """A class for a GLboolean argument that can only except specific values.
+
+ For example glUniformMatrix takes a GLboolean for it's transpose but it
+ must be false.
+ """
+
+ def __init__(self, name, type):
+ EnumBaseArgument.__init__(self, name, "GLboolean", type, "GL_INVALID_VALUE")
+
+ def GetLogArg(self):
+ """Overridden from Argument."""
+ return 'GLES2Util::GetStringBool(%s)' % self.name
+
+
+class ImmediatePointerArgument(Argument):
+ """A class that represents an immediate argument to a function.
+
+ An immediate argument is one where the data follows the command.
+ """
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name, type)
+
+ def IsPointer(self):
+ return True
+
+ def GetPointedType(self):
+ match = re.match('(const\s+)?(?P<element_type>[\w]+)\s*\*', self.type)
+ assert match
+ return match.groupdict()['element_type']
+
+ def AddCmdArgs(self, args):
+ """Overridden from Argument."""
+ pass
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ file.Write(
+ " %s %s = GetImmediateDataAs<%s>(\n" %
+ (self.type, self.name, self.type))
+ file.Write(" c, data_size, immediate_data_size);\n")
+
+ def WriteValidationCode(self, file, func):
+ """Overridden from Argument."""
+ file.Write(" if (%s == NULL) {\n" % self.name)
+ file.Write(" return error::kOutOfBounds;\n")
+ file.Write(" }\n")
+
+ def GetImmediateVersion(self):
+ """Overridden from Argument."""
+ return None
+
+ def WriteDestinationInitalizationValidation(self, file, func):
+ """Overridden from Argument."""
+ self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
+
+ def GetLogArg(self):
+ """Overridden from Argument."""
+ return "static_cast<const void*>(%s)" % self.name
+
+
+class BucketPointerArgument(Argument):
+ """A class that represents an bucket argument to a function."""
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name, type)
+
+ def AddCmdArgs(self, args):
+ """Overridden from Argument."""
+ pass
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ file.Write(
+ " %s %s = bucket->GetData(0, data_size);\n" %
+ (self.type, self.name))
+
+ def WriteValidationCode(self, file, func):
+ """Overridden from Argument."""
+ pass
+
+ def GetImmediateVersion(self):
+ """Overridden from Argument."""
+ return None
+
+ def WriteDestinationInitalizationValidation(self, file, func):
+ """Overridden from Argument."""
+ self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
+
+ def GetLogArg(self):
+ """Overridden from Argument."""
+ return "static_cast<const void*>(%s)" % self.name
+
+
+class PointerArgument(Argument):
+ """A class that represents a pointer argument to a function."""
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name, type)
+
+ def IsPointer(self):
+ """Returns true if argument is a pointer."""
+ return True
+
+ def GetPointedType(self):
+ match = re.match('(const\s+)?(?P<element_type>[\w]+)\s*\*', self.type)
+ assert match
+ return match.groupdict()['element_type']
+
+ def GetValidArg(self, func):
+ """Overridden from Argument."""
+ return "shared_memory_id_, shared_memory_offset_"
+
+ def GetValidGLArg(self, func):
+ """Overridden from Argument."""
+ return "reinterpret_cast<%s>(shared_memory_address_)" % self.type
+
+ def GetNumInvalidValues(self, func):
+ """Overridden from Argument."""
+ return 2
+
+ def GetInvalidArg(self, index):
+ """Overridden from Argument."""
+ if index == 0:
+ return ("kInvalidSharedMemoryId, 0", "kOutOfBounds", None)
+ else:
+ return ("shared_memory_id_, kInvalidSharedMemoryOffset",
+ "kOutOfBounds", None)
+
+ def GetLogArg(self):
+ """Overridden from Argument."""
+ return "static_cast<const void*>(%s)" % self.name
+
+ def AddCmdArgs(self, args):
+ """Overridden from Argument."""
+ args.append(Argument("%s_shm_id" % self.name, 'uint32_t'))
+ args.append(Argument("%s_shm_offset" % self.name, 'uint32_t'))
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ file.Write(
+ " %s %s = GetSharedMemoryAs<%s>(\n" %
+ (self.type, self.name, self.type))
+ file.Write(
+ " c.%s_shm_id, c.%s_shm_offset, data_size);\n" %
+ (self.name, self.name))
+
+ def WriteGetAddress(self, file):
+ """Overridden from Argument."""
+ file.Write(
+ " %s %s = GetSharedMemoryAs<%s>(\n" %
+ (self.type, self.name, self.type))
+ file.Write(
+ " %s_shm_id, %s_shm_offset, %s_size);\n" %
+ (self.name, self.name, self.name))
+
+ def WriteValidationCode(self, file, func):
+ """Overridden from Argument."""
+ file.Write(" if (%s == NULL) {\n" % self.name)
+ file.Write(" return error::kOutOfBounds;\n")
+ file.Write(" }\n")
+
+ def GetImmediateVersion(self):
+ """Overridden from Argument."""
+ return ImmediatePointerArgument(self.name, self.type)
+
+ def GetBucketVersion(self):
+ """Overridden from Argument."""
+ if self.type == "const char*":
+ return InputStringBucketArgument(self.name, self.type)
+ return BucketPointerArgument(self.name, self.type)
+
+ def WriteDestinationInitalizationValidation(self, file, func):
+ """Overridden from Argument."""
+ self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
+
+
+class InputStringBucketArgument(Argument):
+ """An string input argument where the string is passed in a bucket."""
+
+ def __init__(self, name, type):
+ Argument.__init__(self, name + "_bucket_id", "uint32_t")
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ code = """
+ Bucket* %(name)s_bucket = GetBucket(c.%(name)s);
+ if (!%(name)s_bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string %(name)s_str;
+ if (!%(name)s_bucket->GetAsString(&%(name)s_str)) {
+ return error::kInvalidArguments;
+ }
+ const char* %(name)s = %(name)s_str.c_str();
+"""
+ file.Write(code % {
+ 'name': self.name,
+ })
+
+ def GetValidArg(self, func):
+ return "kNameBucketId"
+
+ def GetValidGLArg(self, func):
+ return "_"
+
+
+class ResourceIdArgument(Argument):
+ """A class that represents a resource id argument to a function."""
+
+ def __init__(self, name, type):
+ match = re.match("(GLid\w+)", type)
+ self.resource_type = match.group(1)[4:]
+ type = type.replace(match.group(1), "GLuint")
+ Argument.__init__(self, name, type)
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
+
+ def GetValidArg(self, func):
+ return "client_%s_id_" % self.resource_type.lower()
+
+ def GetValidGLArg(self, func):
+ return "kService%sId" % self.resource_type
+
+
+class ResourceIdBindArgument(Argument):
+ """Represents a resource id argument to a bind function."""
+
+ def __init__(self, name, type):
+ match = re.match("(GLidBind\w+)", type)
+ self.resource_type = match.group(1)[8:]
+ type = type.replace(match.group(1), "GLuint")
+ Argument.__init__(self, name, type)
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ code = """ %(type)s %(name)s = c.%(name)s;
+"""
+ file.Write(code % {'type': self.type, 'name': self.name})
+
+ def GetValidArg(self, func):
+ return "client_%s_id_" % self.resource_type.lower()
+
+ def GetValidGLArg(self, func):
+ return "kService%sId" % self.resource_type
+
+
+class ResourceIdZeroArgument(Argument):
+ """Represents a resource id argument to a function that can be zero."""
+
+ def __init__(self, name, type):
+ match = re.match("(GLidZero\w+)", type)
+ self.resource_type = match.group(1)[8:]
+ type = type.replace(match.group(1), "GLuint")
+ Argument.__init__(self, name, type)
+
+ def WriteGetCode(self, file):
+ """Overridden from Argument."""
+ file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
+
+ def GetValidArg(self, func):
+ return "client_%s_id_" % self.resource_type.lower()
+
+ def GetValidGLArg(self, func):
+ return "kService%sId" % self.resource_type
+
+ def GetNumInvalidValues(self, func):
+ """returns the number of invalid values to be tested."""
+ return 1
+
+ def GetInvalidArg(self, index):
+ """returns an invalid value by index."""
+ return ("kInvalidClientId", "kNoError", "GL_INVALID_VALUE")
+
+
+class Function(object):
+ """A class that represents a function."""
+
+ type_handlers = {
+ '': TypeHandler(),
+ 'Bind': BindHandler(),
+ 'Create': CreateHandler(),
+ 'Custom': CustomHandler(),
+ 'Data': DataHandler(),
+ 'Delete': DeleteHandler(),
+ 'DELn': DELnHandler(),
+ 'GENn': GENnHandler(),
+ 'GETn': GETnHandler(),
+ 'GLchar': GLcharHandler(),
+ 'GLcharN': GLcharNHandler(),
+ 'HandWritten': HandWrittenHandler(),
+ 'Is': IsHandler(),
+ 'Manual': ManualHandler(),
+ 'PUT': PUTHandler(),
+ 'PUTn': PUTnHandler(),
+ 'PUTXn': PUTXnHandler(),
+ 'StateSet': StateSetHandler(),
+ 'StateSetRGBAlpha': StateSetRGBAlphaHandler(),
+ 'StateSetFrontBack': StateSetFrontBackHandler(),
+ 'StateSetFrontBackSeparate': StateSetFrontBackSeparateHandler(),
+ 'StateSetNamedParameter': StateSetNamedParameter(),
+ 'STRn': STRnHandler(),
+ 'Todo': TodoHandler(),
+ }
+
+ def __init__(self, name, info):
+ self.name = name
+ self.original_name = info['original_name']
+
+ self.original_args = self.ParseArgs(info['original_args'])
+
+ if 'cmd_args' in info:
+ self.args_for_cmds = self.ParseArgs(info['cmd_args'])
+ else:
+ self.args_for_cmds = self.original_args[:]
+
+ self.return_type = info['return_type']
+ if self.return_type != 'void':
+ self.return_arg = CreateArg(info['return_type'] + " result")
+ else:
+ self.return_arg = None
+
+ self.num_pointer_args = sum(
+ [1 for arg in self.args_for_cmds if arg.IsPointer()])
+ self.info = info
+ self.type_handler = self.type_handlers[info['type']]
+ self.can_auto_generate = (self.num_pointer_args == 0 and
+ info['return_type'] == "void")
+ self.InitFunction()
+
+ def ParseArgs(self, arg_string):
+ """Parses a function arg string."""
+ args = []
+ parts = arg_string.split(',')
+ for arg_string in parts:
+ arg = CreateArg(arg_string)
+ if arg:
+ args.append(arg)
+ return args
+
+ def IsType(self, type_name):
+ """Returns true if function is a certain type."""
+ return self.info['type'] == type_name
+
+ def InitFunction(self):
+ """Creates command args and calls the init function for the type handler.
+
+ Creates argument lists for command buffer commands, eg. self.cmd_args and
+ self.init_args.
+ Calls the type function initialization.
+ Override to create different kind of command buffer command argument lists.
+ """
+ self.cmd_args = []
+ for arg in self.args_for_cmds:
+ arg.AddCmdArgs(self.cmd_args)
+
+ self.init_args = []
+ for arg in self.args_for_cmds:
+ arg.AddInitArgs(self.init_args)
+
+ if self.return_arg:
+ self.init_args.append(self.return_arg)
+
+ self.type_handler.InitFunction(self)
+
+ def IsImmediate(self):
+ """Returns whether the function is immediate data function or not."""
+ return False
+
+ def GetInfo(self, name, default = None):
+ """Returns a value from the function info for this function."""
+ if name in self.info:
+ return self.info[name]
+ return default
+
+ def GetValidArg(self, arg):
+ """Gets a valid argument value for the parameter arg from the function info
+ if one exists."""
+ try:
+ index = self.GetOriginalArgs().index(arg)
+ except ValueError:
+ return None
+
+ valid_args = self.GetInfo('valid_args')
+ if valid_args and str(index) in valid_args:
+ return valid_args[str(index)]
+ return None
+
+ def AddInfo(self, name, value):
+ """Adds an info."""
+ self.info[name] = value
+
+ def IsExtension(self):
+ return self.GetInfo('extension') or self.GetInfo('extension_flag')
+
+ def IsCoreGLFunction(self):
+ return (not self.IsExtension() and
+ not self.GetInfo('pepper_interface'))
+
+ def InPepperInterface(self, interface):
+ ext = self.GetInfo('pepper_interface')
+ if not interface.GetName():
+ return self.IsCoreGLFunction()
+ return ext == interface.GetName()
+
+ def InAnyPepperExtension(self):
+ return self.IsCoreGLFunction() or self.GetInfo('pepper_interface')
+
+ def GetErrorReturnString(self):
+ if self.GetInfo("error_return"):
+ return self.GetInfo("error_return")
+ elif self.return_type == "GLboolean":
+ return "GL_FALSE"
+ elif "*" in self.return_type:
+ return "NULL"
+ return "0"
+
+ def GetGLFunctionName(self):
+ """Gets the function to call to execute GL for this command."""
+ if self.GetInfo('decoder_func'):
+ return self.GetInfo('decoder_func')
+ return "gl%s" % self.original_name
+
+ def GetGLTestFunctionName(self):
+ gl_func_name = self.GetInfo('gl_test_func')
+ if gl_func_name == None:
+ gl_func_name = self.GetGLFunctionName()
+ if gl_func_name.startswith("gl"):
+ gl_func_name = gl_func_name[2:]
+ else:
+ gl_func_name = self.original_name
+ return gl_func_name
+
+ def GetDataTransferMethods(self):
+ return self.GetInfo('data_transfer_methods',
+ ['immediate' if self.num_pointer_args == 1 else 'shm'])
+
+ def AddCmdArg(self, arg):
+ """Adds a cmd argument to this function."""
+ self.cmd_args.append(arg)
+
+ def GetCmdArgs(self):
+ """Gets the command args for this function."""
+ return self.cmd_args
+
+ def ClearCmdArgs(self):
+ """Clears the command args for this function."""
+ self.cmd_args = []
+
+ def GetCmdConstants(self):
+ """Gets the constants for this function."""
+ return [arg for arg in self.args_for_cmds if arg.IsConstant()]
+
+ def GetInitArgs(self):
+ """Gets the init args for this function."""
+ return self.init_args
+
+ def GetOriginalArgs(self):
+ """Gets the original arguments to this function."""
+ return self.original_args
+
+ def GetLastOriginalArg(self):
+ """Gets the last original argument to this function."""
+ return self.original_args[len(self.original_args) - 1]
+
+ def __MaybePrependComma(self, arg_string, add_comma):
+ """Adds a comma if arg_string is not empty and add_comma is true."""
+ comma = ""
+ if add_comma and len(arg_string):
+ comma = ", "
+ return "%s%s" % (comma, arg_string)
+
+ def MakeTypedOriginalArgString(self, prefix, add_comma = False):
+ """Gets a list of arguments as they are in GL."""
+ args = self.GetOriginalArgs()
+ arg_string = ", ".join(
+ ["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeOriginalArgString(self, prefix, add_comma = False, separator = ", "):
+ """Gets the list of arguments as they are in GL."""
+ args = self.GetOriginalArgs()
+ arg_string = separator.join(
+ ["%s%s" % (prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeTypedHelperArgString(self, prefix, add_comma = False):
+ """Gets a list of typed GL arguments after removing unneeded arguments."""
+ args = self.GetOriginalArgs()
+ arg_string = ", ".join(
+ ["%s %s%s" % (
+ arg.type,
+ prefix,
+ arg.name,
+ ) for arg in args if not arg.IsConstant()])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeHelperArgString(self, prefix, add_comma = False, separator = ", "):
+ """Gets a list of GL arguments after removing unneeded arguments."""
+ args = self.GetOriginalArgs()
+ arg_string = separator.join(
+ ["%s%s" % (prefix, arg.name)
+ for arg in args if not arg.IsConstant()])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeTypedPepperArgString(self, prefix):
+ """Gets a list of arguments as they need to be for Pepper."""
+ if self.GetInfo("pepper_args"):
+ return self.GetInfo("pepper_args")
+ else:
+ return self.MakeTypedOriginalArgString(prefix, False)
+
+ def MapCTypeToPepperIdlType(self, ctype, is_for_return_type=False):
+ """Converts a C type name to the corresponding Pepper IDL type."""
+ idltype = {
+ 'char*': '[out] str_t',
+ 'const GLchar* const*': '[out] cstr_t',
+ 'const char*': 'cstr_t',
+ 'const void*': 'mem_t',
+ 'void*': '[out] mem_t',
+ 'void**': '[out] mem_ptr_t',
+ }.get(ctype, ctype)
+ # We use "GLxxx_ptr_t" for "GLxxx*".
+ matched = re.match(r'(const )?(GL\w+)\*$', ctype)
+ if matched:
+ idltype = matched.group(2) + '_ptr_t'
+ if not matched.group(1):
+ idltype = '[out] ' + idltype
+ # If an in/out specifier is not specified yet, prepend [in].
+ if idltype[0] != '[':
+ idltype = '[in] ' + idltype
+ # Strip the in/out specifier for a return type.
+ if is_for_return_type:
+ idltype = re.sub(r'\[\w+\] ', '', idltype)
+ return idltype
+
+ def MakeTypedPepperIdlArgStrings(self):
+ """Gets a list of arguments as they need to be for Pepper IDL."""
+ args = self.GetOriginalArgs()
+ return ["%s %s" % (self.MapCTypeToPepperIdlType(arg.type), arg.name)
+ for arg in args]
+
+ def GetPepperName(self):
+ if self.GetInfo("pepper_name"):
+ return self.GetInfo("pepper_name")
+ return self.name
+
+ def MakeTypedCmdArgString(self, prefix, add_comma = False):
+ """Gets a typed list of arguments as they need to be for command buffers."""
+ args = self.GetCmdArgs()
+ arg_string = ", ".join(
+ ["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeCmdArgString(self, prefix, add_comma = False):
+ """Gets the list of arguments as they need to be for command buffers."""
+ args = self.GetCmdArgs()
+ arg_string = ", ".join(
+ ["%s%s" % (prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeTypedInitString(self, prefix, add_comma = False):
+ """Gets a typed list of arguments as they need to be for cmd Init/Set."""
+ args = self.GetInitArgs()
+ arg_string = ", ".join(
+ ["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeInitString(self, prefix, add_comma = False):
+ """Gets the list of arguments as they need to be for cmd Init/Set."""
+ args = self.GetInitArgs()
+ arg_string = ", ".join(
+ ["%s%s" % (prefix, arg.name) for arg in args])
+ return self.__MaybePrependComma(arg_string, add_comma)
+
+ def MakeLogArgString(self):
+ """Makes a string of the arguments for the LOG macros"""
+ args = self.GetOriginalArgs()
+ return ' << ", " << '.join([arg.GetLogArg() for arg in args])
+
+ def WriteCommandDescription(self, file):
+ """Writes a description of the command."""
+ file.Write("//! Command that corresponds to gl%s.\n" % self.original_name)
+
+ def WriteHandlerValidation(self, file):
+ """Writes validation code for the function."""
+ for arg in self.GetOriginalArgs():
+ arg.WriteValidationCode(file, self)
+ self.WriteValidationCode(file)
+
+ def WriteHandlerImplementation(self, file):
+ """Writes the handler implementation for this command."""
+ self.type_handler.WriteHandlerImplementation(self, file)
+
+ def WriteValidationCode(self, file):
+ """Writes the validation code for a command."""
+ pass
+
+ def WriteCmdFlag(self, file):
+ """Writes the cmd cmd_flags constant."""
+ flags = []
+ # By default trace only at the highest level 3.
+ trace_level = int(self.GetInfo('trace_level', default = 3))
+ if trace_level not in xrange(0, 4):
+ raise KeyError("Unhandled trace_level: %d" % trace_level)
+
+ flags.append('CMD_FLAG_SET_TRACE_LEVEL(%d)' % trace_level)
+
+ if len(flags) > 0:
+ cmd_flags = ' | '.join(flags)
+ else:
+ cmd_flags = 0
+
+ file.Write(" static const uint8 cmd_flags = %s;\n" % cmd_flags)
+
+
+ def WriteCmdArgFlag(self, file):
+ """Writes the cmd kArgFlags constant."""
+ file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kFixed;\n")
+
+ def WriteCmdComputeSize(self, file):
+ """Writes the ComputeSize function for the command."""
+ file.Write(" static uint32_t ComputeSize() {\n")
+ file.Write(
+ " return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteCmdSetHeader(self, file):
+ """Writes the cmd's SetHeader function."""
+ file.Write(" void SetHeader() {\n")
+ file.Write(" header.SetCmd<ValueType>();\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteCmdInit(self, file):
+ """Writes the cmd's Init function."""
+ file.Write(" void Init(%s) {\n" % self.MakeTypedCmdArgString("_"))
+ file.Write(" SetHeader();\n")
+ args = self.GetCmdArgs()
+ for arg in args:
+ file.Write(" %s = _%s;\n" % (arg.name, arg.name))
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteCmdSet(self, file):
+ """Writes the cmd's Set function."""
+ copy_args = self.MakeCmdArgString("_", False)
+ file.Write(" void* Set(void* cmd%s) {\n" %
+ self.MakeTypedCmdArgString("_", True))
+ file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
+ file.Write(" return NextCmdAddress<ValueType>(cmd);\n")
+ file.Write(" }\n")
+ file.Write("\n")
+
+ def WriteStruct(self, file):
+ self.type_handler.WriteStruct(self, file)
+
+ def WriteDocs(self, file):
+ self.type_handler.WriteDocs(self, file)
+
+ def WriteCmdHelper(self, file):
+ """Writes the cmd's helper."""
+ self.type_handler.WriteCmdHelper(self, file)
+
+ def WriteServiceImplementation(self, file):
+ """Writes the service implementation for a command."""
+ self.type_handler.WriteServiceImplementation(self, file)
+
+ def WriteServiceUnitTest(self, file, *extras):
+ """Writes the service implementation for a command."""
+ self.type_handler.WriteServiceUnitTest(self, file, *extras)
+
+ def WriteGLES2CLibImplementation(self, file):
+ """Writes the GLES2 C Lib Implemention."""
+ self.type_handler.WriteGLES2CLibImplementation(self, file)
+
+ def WriteGLES2InterfaceHeader(self, file):
+ """Writes the GLES2 Interface declaration."""
+ self.type_handler.WriteGLES2InterfaceHeader(self, file)
+
+ def WriteGLES2InterfaceStub(self, file):
+ """Writes the GLES2 Interface Stub declaration."""
+ self.type_handler.WriteGLES2InterfaceStub(self, file)
+
+ def WriteGLES2InterfaceStubImpl(self, file):
+ """Writes the GLES2 Interface Stub declaration."""
+ self.type_handler.WriteGLES2InterfaceStubImpl(self, file)
+
+ def WriteGLES2ImplementationHeader(self, file):
+ """Writes the GLES2 Implemention declaration."""
+ self.type_handler.WriteGLES2ImplementationHeader(self, file)
+
+ def WriteGLES2Implementation(self, file):
+ """Writes the GLES2 Implemention definition."""
+ self.type_handler.WriteGLES2Implementation(self, file)
+
+ def WriteGLES2TraceImplementationHeader(self, file):
+ """Writes the GLES2 Trace Implemention declaration."""
+ self.type_handler.WriteGLES2TraceImplementationHeader(self, file)
+
+ def WriteGLES2TraceImplementation(self, file):
+ """Writes the GLES2 Trace Implemention definition."""
+ self.type_handler.WriteGLES2TraceImplementation(self, file)
+
+ def WriteGLES2Header(self, file):
+ """Writes the GLES2 Implemention unit test."""
+ self.type_handler.WriteGLES2Header(self, file)
+
+ def WriteGLES2ImplementationUnitTest(self, file):
+ """Writes the GLES2 Implemention unit test."""
+ self.type_handler.WriteGLES2ImplementationUnitTest(self, file)
+
+ def WriteDestinationInitalizationValidation(self, file):
+ """Writes the client side destintion initialization validation."""
+ self.type_handler.WriteDestinationInitalizationValidation(self, file)
+
+ def WriteFormatTest(self, file):
+ """Writes the cmd's format test."""
+ self.type_handler.WriteFormatTest(self, file)
+
+
+class PepperInterface(object):
+ """A class that represents a function."""
+
+ def __init__(self, info):
+ self.name = info["name"]
+ self.dev = info["dev"]
+
+ def GetName(self):
+ return self.name
+
+ def GetInterfaceName(self):
+ upperint = ""
+ dev = ""
+ if self.name:
+ upperint = "_" + self.name.upper()
+ if self.dev:
+ dev = "_DEV"
+ return "PPB_OPENGLES2%s%s_INTERFACE" % (upperint, dev)
+
+ def GetInterfaceString(self):
+ dev = ""
+ if self.dev:
+ dev = "(Dev)"
+ return "PPB_OpenGLES2%s%s" % (self.name, dev)
+
+ def GetStructName(self):
+ dev = ""
+ if self.dev:
+ dev = "_Dev"
+ return "PPB_OpenGLES2%s%s" % (self.name, dev)
+
+
+class ImmediateFunction(Function):
+ """A class that represnets an immediate function command."""
+
+ def __init__(self, func):
+ Function.__init__(
+ self,
+ "%sImmediate" % func.name,
+ func.info)
+
+ def InitFunction(self):
+ # Override args in original_args and args_for_cmds with immediate versions
+ # of the args.
+
+ new_original_args = []
+ for arg in self.original_args:
+ new_arg = arg.GetImmediateVersion()
+ if new_arg:
+ new_original_args.append(new_arg)
+ self.original_args = new_original_args
+
+ new_args_for_cmds = []
+ for arg in self.args_for_cmds:
+ new_arg = arg.GetImmediateVersion()
+ if new_arg:
+ new_args_for_cmds.append(new_arg)
+
+ self.args_for_cmds = new_args_for_cmds
+
+ Function.InitFunction(self)
+
+ def IsImmediate(self):
+ return True
+
+ def WriteCommandDescription(self, file):
+ """Overridden from Function"""
+ file.Write("//! Immediate version of command that corresponds to gl%s.\n" %
+ self.original_name)
+
+ def WriteServiceImplementation(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateServiceImplementation(self, file)
+
+ def WriteHandlerImplementation(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateHandlerImplementation(self, file)
+
+ def WriteServiceUnitTest(self, file, *extras):
+ """Writes the service implementation for a command."""
+ self.type_handler.WriteImmediateServiceUnitTest(self, file, *extras)
+
+ def WriteValidationCode(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateValidationCode(self, file)
+
+ def WriteCmdArgFlag(self, file):
+ """Overridden from Function"""
+ file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;\n")
+
+ def WriteCmdComputeSize(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateCmdComputeSize(self, file)
+
+ def WriteCmdSetHeader(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateCmdSetHeader(self, file)
+
+ def WriteCmdInit(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateCmdInit(self, file)
+
+ def WriteCmdSet(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateCmdSet(self, file)
+
+ def WriteCmdHelper(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateCmdHelper(self, file)
+
+ def WriteFormatTest(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteImmediateFormatTest(self, file)
+
+
+class BucketFunction(Function):
+ """A class that represnets a bucket version of a function command."""
+
+ def __init__(self, func):
+ Function.__init__(
+ self,
+ "%sBucket" % func.name,
+ func.info)
+
+ def InitFunction(self):
+ # Override args in original_args and args_for_cmds with bucket versions
+ # of the args.
+
+ new_original_args = []
+ for arg in self.original_args:
+ new_arg = arg.GetBucketVersion()
+ if new_arg:
+ new_original_args.append(new_arg)
+ self.original_args = new_original_args
+
+ new_args_for_cmds = []
+ for arg in self.args_for_cmds:
+ new_arg = arg.GetBucketVersion()
+ if new_arg:
+ new_args_for_cmds.append(new_arg)
+
+ self.args_for_cmds = new_args_for_cmds
+
+ Function.InitFunction(self)
+
+ def WriteCommandDescription(self, file):
+ """Overridden from Function"""
+ file.Write("//! Bucket version of command that corresponds to gl%s.\n" %
+ self.original_name)
+
+ def WriteServiceImplementation(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteBucketServiceImplementation(self, file)
+
+ def WriteHandlerImplementation(self, file):
+ """Overridden from Function"""
+ self.type_handler.WriteBucketHandlerImplementation(self, file)
+
+ def WriteServiceUnitTest(self, file, *extras):
+ """Writes the service implementation for a command."""
+ self.type_handler.WriteBucketServiceUnitTest(self, file, *extras)
+
+
+def CreateArg(arg_string):
+ """Creates an Argument."""
+ arg_parts = arg_string.split()
+ if len(arg_parts) == 1 and arg_parts[0] == 'void':
+ return None
+ # Is this a pointer argument?
+ elif arg_string.find('*') >= 0:
+ return PointerArgument(
+ arg_parts[-1],
+ " ".join(arg_parts[0:-1]))
+ # Is this a resource argument? Must come after pointer check.
+ elif arg_parts[0].startswith('GLidBind'):
+ return ResourceIdBindArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLidZero'):
+ return ResourceIdZeroArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLid'):
+ return ResourceIdArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLenum') and len(arg_parts[0]) > 6:
+ return EnumArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLboolean') and len(arg_parts[0]) > 9:
+ return ValidatedBoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLboolean'):
+ return BoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif arg_parts[0].startswith('GLintUniformLocation'):
+ return UniformLocationArgument(arg_parts[-1])
+ elif (arg_parts[0].startswith('GLint') and len(arg_parts[0]) > 5 and
+ not arg_parts[0].startswith('GLintptr')):
+ return IntArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ elif (arg_parts[0].startswith('GLsizeiNotNegative') or
+ arg_parts[0].startswith('GLintptrNotNegative')):
+ return SizeNotNegativeArgument(arg_parts[-1],
+ " ".join(arg_parts[0:-1]),
+ arg_parts[0][0:-11])
+ elif arg_parts[0].startswith('GLsize'):
+ return SizeArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+ else:
+ return Argument(arg_parts[-1], " ".join(arg_parts[0:-1]))
+
+
+class GLGenerator(object):
+ """A class to generate GL command buffers."""
+
+ _function_re = re.compile(r'GL_APICALL(.*?)GL_APIENTRY (.*?) \((.*?)\);')
+
+ def __init__(self, verbose):
+ self.original_functions = []
+ self.functions = []
+ self.verbose = verbose
+ self.errors = 0
+ self.pepper_interfaces = []
+ self.interface_info = {}
+ self.generated_cpp_filenames = []
+
+ for interface in _PEPPER_INTERFACES:
+ interface = PepperInterface(interface)
+ self.pepper_interfaces.append(interface)
+ self.interface_info[interface.GetName()] = interface
+
+ def AddFunction(self, func):
+ """Adds a function."""
+ self.functions.append(func)
+
+ def GetFunctionInfo(self, name):
+ """Gets a type info for the given function name."""
+ if name in _FUNCTION_INFO:
+ func_info = _FUNCTION_INFO[name].copy()
+ else:
+ func_info = {}
+
+ if not 'type' in func_info:
+ func_info['type'] = ''
+
+ return func_info
+
+ def Log(self, msg):
+ """Prints something if verbose is true."""
+ if self.verbose:
+ print msg
+
+ def Error(self, msg):
+ """Prints an error."""
+ print "Error: %s" % msg
+ self.errors += 1
+
+ def WriteLicense(self, file):
+ """Writes the license."""
+ file.Write(_LICENSE)
+
+ def WriteNamespaceOpen(self, file):
+ """Writes the code for the namespace."""
+ file.Write("namespace gpu {\n")
+ file.Write("namespace gles2 {\n")
+ file.Write("\n")
+
+ def WriteNamespaceClose(self, file):
+ """Writes the code to close the namespace."""
+ file.Write("} // namespace gles2\n")
+ file.Write("} // namespace gpu\n")
+ file.Write("\n")
+
+ def ParseGLH(self, filename):
+ """Parses the cmd_buffer_functions.txt file and extracts the functions"""
+ f = open(filename, "r")
+ functions = f.read()
+ f.close()
+ for line in functions.splitlines():
+ match = self._function_re.match(line)
+ if match:
+ func_name = match.group(2)[2:]
+ func_info = self.GetFunctionInfo(func_name)
+ if func_info['type'] == 'Noop':
+ continue
+
+ parsed_func_info = {
+ 'original_name': func_name,
+ 'original_args': match.group(3),
+ 'return_type': match.group(1).strip(),
+ }
+
+ for k in parsed_func_info.keys():
+ if not k in func_info:
+ func_info[k] = parsed_func_info[k]
+
+ f = Function(func_name, func_info)
+ self.original_functions.append(f)
+
+ #for arg in f.GetOriginalArgs():
+ # if not isinstance(arg, EnumArgument) and arg.type == 'GLenum':
+ # self.Log("%s uses bare GLenum %s." % (func_name, arg.name))
+
+ gen_cmd = f.GetInfo('gen_cmd')
+ if gen_cmd == True or gen_cmd == None:
+ if f.type_handler.NeedsDataTransferFunction(f):
+ methods = f.GetDataTransferMethods()
+ if 'immediate' in methods:
+ self.AddFunction(ImmediateFunction(f))
+ if 'bucket' in methods:
+ self.AddFunction(BucketFunction(f))
+ if 'shm' in methods:
+ self.AddFunction(f)
+ else:
+ self.AddFunction(f)
+
+ self.Log("Auto Generated Functions : %d" %
+ len([f for f in self.functions if f.can_auto_generate or
+ (not f.IsType('') and not f.IsType('Custom') and
+ not f.IsType('Todo'))]))
+
+ funcs = [f for f in self.functions if not f.can_auto_generate and
+ (f.IsType('') or f.IsType('Custom') or f.IsType('Todo'))]
+ self.Log("Non Auto Generated Functions: %d" % len(funcs))
+
+ for f in funcs:
+ self.Log(" %-10s %-20s gl%s" % (f.info['type'], f.return_type, f.name))
+
+ def WriteCommandIds(self, filename):
+ """Writes the command buffer format"""
+ file = CHeaderWriter(filename)
+ file.Write("#define GLES2_COMMAND_LIST(OP) \\\n")
+ id = 256
+ for func in self.functions:
+ file.Write(" %-60s /* %d */ \\\n" %
+ ("OP(%s)" % func.name, id))
+ id += 1
+ file.Write("\n")
+
+ file.Write("enum CommandId {\n")
+ file.Write(" kStartPoint = cmd::kLastCommonId, "
+ "// All GLES2 commands start after this.\n")
+ file.Write("#define GLES2_CMD_OP(name) k ## name,\n")
+ file.Write(" GLES2_COMMAND_LIST(GLES2_CMD_OP)\n")
+ file.Write("#undef GLES2_CMD_OP\n")
+ file.Write(" kNumCommands\n")
+ file.Write("};\n")
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteFormat(self, filename):
+ """Writes the command buffer format"""
+ file = CHeaderWriter(filename)
+ for func in self.functions:
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ func.WriteStruct(file)
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteDocs(self, filename):
+ """Writes the command buffer doc version of the commands"""
+ file = CWriter(filename)
+ for func in self.functions:
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ func.WriteDocs(file)
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteFormatTest(self, filename):
+ """Writes the command buffer format test."""
+ file = CHeaderWriter(
+ filename,
+ "// This file contains unit tests for gles2 commmands\n"
+ "// It is included by gles2_cmd_format_test.cc\n"
+ "\n")
+
+ for func in self.functions:
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ func.WriteFormatTest(file)
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteCmdHelperHeader(self, filename):
+ """Writes the gles2 command helper."""
+ file = CHeaderWriter(filename)
+
+ for func in self.functions:
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ func.WriteCmdHelper(file)
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceContextStateHeader(self, filename):
+ """Writes the service context state header."""
+ file = CHeaderWriter(
+ filename,
+ "// It is included by context_state.h\n")
+ file.Write("struct EnableFlags {\n")
+ file.Write(" EnableFlags();\n")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" bool %s;\n" % capability['name'])
+ file.Write(" bool cached_%s;\n" % capability['name'])
+ file.Write("};\n\n")
+
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ for item in state['states']:
+ if isinstance(item['default'], list):
+ file.Write("%s %s[%d];\n" % (item['type'], item['name'],
+ len(item['default'])))
+ else:
+ file.Write("%s %s;\n" % (item['type'], item['name']))
+
+ if item.get('cached', False):
+ if isinstance(item['default'], list):
+ file.Write("%s cached_%s[%d];\n" % (item['type'], item['name'],
+ len(item['default'])))
+ else:
+ file.Write("%s cached_%s;\n" % (item['type'], item['name']))
+
+ file.Write("\n")
+
+ file.Write("""
+ inline void SetDeviceCapabilityState(GLenum cap, bool enable) {
+ switch (cap) {
+ """)
+ for capability in _CAPABILITY_FLAGS:
+ file.Write("""\
+ case GL_%s:
+ """ % capability['name'].upper())
+ file.Write("""\
+ if (enable_flags.cached_%(name)s == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_%(name)s = enable;
+ break;
+ """ % capability)
+
+ file.Write("""\
+ default:
+ NOTREACHED();
+ return;
+ }
+ if (enable)
+ glEnable(cap);
+ else
+ glDisable(cap);
+ }
+ """)
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteClientContextStateHeader(self, filename):
+ """Writes the client context state header."""
+ file = CHeaderWriter(
+ filename,
+ "// It is included by client_context_state.h\n")
+ file.Write("struct EnableFlags {\n")
+ file.Write(" EnableFlags();\n")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" bool %s;\n" % capability['name'])
+ file.Write("};\n\n")
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteContextStateGetters(self, file, class_name):
+ """Writes the state getters."""
+ for gl_type in ["GLint", "GLfloat"]:
+ file.Write("""
+bool %s::GetStateAs%s(
+ GLenum pname, %s* params, GLsizei* num_written) const {
+ switch (pname) {
+""" % (class_name, gl_type, gl_type))
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ if 'enum' in state:
+ file.Write(" case %s:\n" % state['enum'])
+ file.Write(" *num_written = %d;\n" % len(state['states']))
+ file.Write(" if (params) {\n")
+ for ndx,item in enumerate(state['states']):
+ file.Write(" params[%d] = static_cast<%s>(%s);\n" %
+ (ndx, gl_type, item['name']))
+ file.Write(" }\n")
+ file.Write(" return true;\n")
+ else:
+ for item in state['states']:
+ file.Write(" case %s:\n" % item['enum'])
+ if isinstance(item['default'], list):
+ item_len = len(item['default'])
+ file.Write(" *num_written = %d;\n" % item_len)
+ file.Write(" if (params) {\n")
+ if item['type'] == gl_type:
+ file.Write(" memcpy(params, %s, sizeof(%s) * %d);\n" %
+ (item['name'], item['type'], item_len))
+ else:
+ file.Write(" for (size_t i = 0; i < %s; ++i) {\n" %
+ item_len)
+ file.Write(" params[i] = %s;\n" %
+ (GetGLGetTypeConversion(gl_type, item['type'],
+ "%s[i]" % item['name'])))
+ file.Write(" }\n");
+ else:
+ file.Write(" *num_written = 1;\n")
+ file.Write(" if (params) {\n")
+ file.Write(" params[0] = %s;\n" %
+ (GetGLGetTypeConversion(gl_type, item['type'],
+ item['name'])))
+ file.Write(" }\n")
+ file.Write(" return true;\n")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" case GL_%s:\n" % capability['name'].upper())
+ file.Write(" *num_written = 1;\n")
+ file.Write(" if (params) {\n")
+ file.Write(
+ " params[0] = static_cast<%s>(enable_flags.%s);\n" %
+ (gl_type, capability['name']))
+ file.Write(" }\n")
+ file.Write(" return true;\n")
+ file.Write(""" default:
+ return false;
+ }
+}
+""")
+
+ def WriteServiceContextStateImpl(self, filename):
+ """Writes the context state service implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// It is included by context_state.cc\n")
+ code = []
+ for capability in _CAPABILITY_FLAGS:
+ code.append("%s(%s)" %
+ (capability['name'],
+ ('false', 'true')['default' in capability]))
+ code.append("cached_%s(%s)" %
+ (capability['name'],
+ ('false', 'true')['default' in capability]))
+ file.Write("ContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
+ ",\n ".join(code))
+ file.Write("\n")
+
+ file.Write("void ContextState::Initialize() {\n")
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ for item in state['states']:
+ if isinstance(item['default'], list):
+ for ndx, value in enumerate(item['default']):
+ file.Write(" %s[%d] = %s;\n" % (item['name'], ndx, value))
+ else:
+ file.Write(" %s = %s;\n" % (item['name'], item['default']))
+ if item.get('cached', False):
+ if isinstance(item['default'], list):
+ for ndx, value in enumerate(item['default']):
+ file.Write(" cached_%s[%d] = %s;\n" % (item['name'], ndx, value))
+ else:
+ file.Write(" cached_%s = %s;\n" % (item['name'], item['default']))
+ file.Write("}\n")
+
+ file.Write("""
+void ContextState::InitCapabilities(const ContextState* prev_state) const {
+""")
+ def WriteCapabilities(test_prev):
+ for capability in _CAPABILITY_FLAGS:
+ capability_name = capability['name']
+ if test_prev:
+ file.Write(""" if (prev_state->enable_flags.cached_%s !=
+ enable_flags.cached_%s)\n""" %
+ (capability_name, capability_name))
+ file.Write(" EnableDisable(GL_%s, enable_flags.cached_%s);\n" %
+ (capability_name.upper(), capability_name))
+
+ file.Write(" if (prev_state) {")
+ WriteCapabilities(True)
+ file.Write(" } else {")
+ WriteCapabilities(False)
+ file.Write(" }")
+
+ file.Write("""}
+
+void ContextState::InitState(const ContextState *prev_state) const {
+""")
+
+ def WriteStates(test_prev):
+ # We need to sort the keys so the expectations match
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ if state['type'] == 'FrontBack':
+ num_states = len(state['states'])
+ for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
+ if test_prev:
+ file.Write(" if (")
+ args = []
+ for place, item in enumerate(group):
+ item_name = CachedStateName(item)
+ args.append('%s' % item_name)
+ if test_prev:
+ if place > 0:
+ file.Write(' ||\n')
+ file.Write("(%s != prev_state->%s)" % (item_name, item_name))
+ if test_prev:
+ file.Write(")\n")
+ file.Write(
+ " gl%s(%s, %s);\n" %
+ (state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
+ elif state['type'] == 'NamedParameter':
+ for item in state['states']:
+ item_name = CachedStateName(item)
+
+ if 'extension_flag' in item:
+ file.Write(" if (feature_info_->feature_flags().%s) {\n " %
+ item['extension_flag'])
+ if test_prev:
+ if isinstance(item['default'], list):
+ file.Write(" if (memcmp(prev_state->%s, %s, "
+ "sizeof(%s) * %d)) {\n" %
+ (item_name, item_name, item['type'],
+ len(item['default'])))
+ else:
+ file.Write(" if (prev_state->%s != %s) {\n " %
+ (item_name, item_name))
+ file.Write(" gl%s(%s, %s);\n" %
+ (state['func'],
+ (item['enum_set']
+ if 'enum_set' in item else item['enum']),
+ item['name']))
+ if test_prev:
+ if 'extension_flag' in item:
+ file.Write(" ")
+ file.Write(" }")
+ if 'extension_flag' in item:
+ file.Write(" }")
+ else:
+ if 'extension_flag' in state:
+ file.Write(" if (feature_info_->feature_flags().%s)\n " %
+ state['extension_flag'])
+ if test_prev:
+ file.Write(" if (")
+ args = []
+ for place, item in enumerate(state['states']):
+ item_name = CachedStateName(item)
+ args.append('%s' % item_name)
+ if test_prev:
+ if place > 0:
+ file.Write(' ||\n')
+ file.Write("(%s != prev_state->%s)" %
+ (item_name, item_name))
+ if test_prev:
+ file.Write(" )\n")
+ file.Write(" gl%s(%s);\n" % (state['func'], ", ".join(args)))
+
+ file.Write(" if (prev_state) {")
+ WriteStates(True)
+ file.Write(" } else {")
+ WriteStates(False)
+ file.Write(" }")
+ file.Write("}\n")
+
+ file.Write("""bool ContextState::GetEnabled(GLenum cap) const {
+ switch (cap) {
+""")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" case GL_%s:\n" % capability['name'].upper())
+ file.Write(" return enable_flags.%s;\n" % capability['name'])
+ file.Write(""" default:
+ NOTREACHED();
+ return false;
+ }
+}
+""")
+
+ self.WriteContextStateGetters(file, "ContextState")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteClientContextStateImpl(self, filename):
+ """Writes the context state client side implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// It is included by client_context_state.cc\n")
+ code = []
+ for capability in _CAPABILITY_FLAGS:
+ code.append("%s(%s)" %
+ (capability['name'],
+ ('false', 'true')['default' in capability]))
+ file.Write(
+ "ClientContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
+ ",\n ".join(code))
+ file.Write("\n")
+
+ file.Write("""
+bool ClientContextState::SetCapabilityState(
+ GLenum cap, bool enabled, bool* changed) {
+ *changed = false;
+ switch (cap) {
+""")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" case GL_%s:\n" % capability['name'].upper())
+ file.Write(""" if (enable_flags.%(name)s != enabled) {
+ *changed = true;
+ enable_flags.%(name)s = enabled;
+ }
+ return true;
+""" % capability)
+ file.Write(""" default:
+ return false;
+ }
+}
+""")
+ file.Write("""bool ClientContextState::GetEnabled(
+ GLenum cap, bool* enabled) const {
+ switch (cap) {
+""")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" case GL_%s:\n" % capability['name'].upper())
+ file.Write(" *enabled = enable_flags.%s;\n" % capability['name'])
+ file.Write(" return true;\n")
+ file.Write(""" default:
+ return false;
+ }
+}
+""")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceImplementation(self, filename):
+ """Writes the service decorder implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// It is included by gles2_cmd_decoder.cc\n")
+
+ for func in self.functions:
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ func.WriteServiceImplementation(file)
+
+ file.Write("""
+bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
+ switch (cap) {
+""")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" case GL_%s:\n" % capability['name'].upper())
+ if 'state_flag' in capability:
+
+ file.Write("""\
+ state_.enable_flags.%(name)s = enabled;
+ if (state_.enable_flags.cached_%(name)s != enabled
+ || state_.ignore_cached_state) {
+ %(state_flag)s = true;
+ }
+ return false;
+ """ % capability)
+ else:
+ file.Write("""\
+ state_.enable_flags.%(name)s = enabled;
+ if (state_.enable_flags.cached_%(name)s != enabled
+ || state_.ignore_cached_state) {
+ state_.enable_flags.cached_%(name)s = enabled;
+ return true;
+ }
+ return false;
+ """ % capability)
+ file.Write(""" default:
+ NOTREACHED();
+ return false;
+ }
+}
+""")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceUnitTests(self, filename):
+ """Writes the service decorder unit tests."""
+ num_tests = len(self.functions)
+ FUNCTIONS_PER_FILE = 98 # hard code this so it doesn't change.
+ count = 0
+ for test_num in range(0, num_tests, FUNCTIONS_PER_FILE):
+ count += 1
+ name = filename % count
+ file = CHeaderWriter(
+ name,
+ "// It is included by gles2_cmd_decoder_unittest_%d.cc\n" % count)
+ test_name = 'GLES2DecoderTest%d' % count
+ end = test_num + FUNCTIONS_PER_FILE
+ if end > num_tests:
+ end = num_tests
+ for idx in range(test_num, end):
+ func = self.functions[idx]
+
+ # Do any filtering of the functions here, so that the functions
+ # will not move between the numbered files if filtering properties
+ # are changed.
+ if func.GetInfo('extension_flag'):
+ continue
+
+ if True:
+ #gen_cmd = func.GetInfo('gen_cmd')
+ #if gen_cmd == True or gen_cmd == None:
+ if func.GetInfo('unit_test') == False:
+ file.Write("// TODO(gman): %s\n" % func.name)
+ else:
+ func.WriteServiceUnitTest(file, {
+ 'test_name': test_name
+ })
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+ file = CHeaderWriter(
+ filename % 0,
+ "// It is included by gles2_cmd_decoder_unittest_base.cc\n")
+ file.Write(
+"""void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations() {
+""")
+ for capability in _CAPABILITY_FLAGS:
+ file.Write(" ExpectEnableDisable(GL_%s, %s);\n" %
+ (capability['name'].upper(),
+ ('false', 'true')['default' in capability]))
+ file.Write("""}
+
+void GLES2DecoderTestBase::SetupInitStateExpectations() {
+""")
+
+ # We need to sort the keys so the expectations match
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ if state['type'] == 'FrontBack':
+ num_states = len(state['states'])
+ for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
+ args = []
+ for item in group:
+ if 'expected' in item:
+ args.append(item['expected'])
+ else:
+ args.append(item['default'])
+ file.Write(
+ " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ (state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
+ file.Write(" .Times(1)\n")
+ file.Write(" .RetiresOnSaturation();\n")
+ elif state['type'] == 'NamedParameter':
+ for item in state['states']:
+ if 'extension_flag' in item:
+ file.Write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ item['extension_flag'])
+ file.Write(" ")
+ expect_value = item['default']
+ if isinstance(expect_value, list):
+ # TODO: Currently we do not check array values.
+ expect_value = "_"
+
+ file.Write(
+ " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ (state['func'],
+ (item['enum_set']
+ if 'enum_set' in item else item['enum']),
+ expect_value))
+ file.Write(" .Times(1)\n")
+ file.Write(" .RetiresOnSaturation();\n")
+ if 'extension_flag' in item:
+ file.Write(" }\n")
+ else:
+ if 'extension_flag' in state:
+ file.Write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ state['extension_flag'])
+ file.Write(" ")
+ args = []
+ for item in state['states']:
+ if 'expected' in item:
+ args.append(item['expected'])
+ else:
+ args.append(item['default'])
+ # TODO: Currently we do not check array values.
+ args = ["_" if isinstance(arg, list) else arg for arg in args]
+ file.Write(" EXPECT_CALL(*gl_, %s(%s))\n" %
+ (state['func'], ", ".join(args)))
+ file.Write(" .Times(1)\n")
+ file.Write(" .RetiresOnSaturation();\n")
+ if 'extension_flag' in state:
+ file.Write(" }\n")
+ file.Write("""}
+""")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceUnitTestsForExtensions(self, filename):
+ """Writes the service decorder unit tests for functions with extension_flag.
+
+ The functions are special in that they need a specific unit test
+ baseclass to turn on the extension.
+ """
+ functions = [f for f in self.functions if f.GetInfo('extension_flag')]
+ file = CHeaderWriter(
+ filename,
+ "// It is included by gles2_cmd_decoder_unittest_extensions.cc\n")
+ for func in functions:
+ if True:
+ if func.GetInfo('unit_test') == False:
+ file.Write("// TODO(gman): %s\n" % func.name)
+ else:
+ extension = ToCamelCase(
+ ToGLExtensionString(func.GetInfo('extension_flag')))
+ func.WriteServiceUnitTest(file, {
+ 'test_name': 'GLES2DecoderTestWith%s' % extension
+ })
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2Header(self, filename):
+ """Writes the GLES2 header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file contains Chromium-specific GLES2 declarations.\n\n")
+
+ for func in self.original_functions:
+ func.WriteGLES2Header(file)
+
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2CLibImplementation(self, filename):
+ """Writes the GLES2 c lib implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// These functions emulate GLES2 over command buffers.\n")
+
+ for func in self.original_functions:
+ func.WriteGLES2CLibImplementation(file)
+
+ file.Write("""
+namespace gles2 {
+
+extern const NameToFunc g_gles2_function_table[] = {
+""")
+ for func in self.original_functions:
+ file.Write(
+ ' { "gl%s", reinterpret_cast<GLES2FunctionPointer>(gl%s), },\n' %
+ (func.name, func.name))
+ file.Write(""" { NULL, NULL, },
+};
+
+} // namespace gles2
+""")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2InterfaceHeader(self, filename):
+ """Writes the GLES2 interface header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_interface.h to declare the\n"
+ "// GL api functions.\n")
+ for func in self.original_functions:
+ func.WriteGLES2InterfaceHeader(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2InterfaceStub(self, filename):
+ """Writes the GLES2 interface stub header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_interface_stub.h.\n")
+ for func in self.original_functions:
+ func.WriteGLES2InterfaceStub(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2InterfaceStubImpl(self, filename):
+ """Writes the GLES2 interface header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_interface_stub.cc.\n")
+ for func in self.original_functions:
+ func.WriteGLES2InterfaceStubImpl(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2ImplementationHeader(self, filename):
+ """Writes the GLES2 Implementation header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_implementation.h to declare the\n"
+ "// GL api functions.\n")
+ for func in self.original_functions:
+ func.WriteGLES2ImplementationHeader(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2Implementation(self, filename):
+ """Writes the GLES2 Implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_implementation.cc to define the\n"
+ "// GL api functions.\n")
+ for func in self.original_functions:
+ func.WriteGLES2Implementation(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2TraceImplementationHeader(self, filename):
+ """Writes the GLES2 Trace Implementation header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_trace_implementation.h\n")
+ for func in self.original_functions:
+ func.WriteGLES2TraceImplementationHeader(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2TraceImplementation(self, filename):
+ """Writes the GLES2 Trace Implementation."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_trace_implementation.cc\n")
+ for func in self.original_functions:
+ func.WriteGLES2TraceImplementation(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2ImplementationUnitTests(self, filename):
+ """Writes the GLES2 helper header."""
+ file = CHeaderWriter(
+ filename,
+ "// This file is included by gles2_implementation.h to declare the\n"
+ "// GL api functions.\n")
+ for func in self.original_functions:
+ func.WriteGLES2ImplementationUnitTest(file)
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceUtilsHeader(self, filename):
+ """Writes the gles2 auto generated utility header."""
+ file = CHeaderWriter(filename)
+ for name in sorted(_NAMED_TYPE_INFO.keys()):
+ named_type = NamedType(_NAMED_TYPE_INFO[name])
+ if named_type.IsConstant():
+ continue
+ file.Write("ValueValidator<%s> %s;\n" %
+ (named_type.GetType(), ToUnderscore(name)))
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteServiceUtilsImplementation(self, filename):
+ """Writes the gles2 auto generated utility implementation."""
+ file = CHeaderWriter(filename)
+ names = sorted(_NAMED_TYPE_INFO.keys())
+ for name in names:
+ named_type = NamedType(_NAMED_TYPE_INFO[name])
+ if named_type.IsConstant():
+ continue
+ if named_type.GetValidValues():
+ file.Write("static const %s valid_%s_table[] = {\n" %
+ (named_type.GetType(), ToUnderscore(name)))
+ for value in named_type.GetValidValues():
+ file.Write(" %s,\n" % value)
+ file.Write("};\n")
+ file.Write("\n")
+ file.Write("Validators::Validators()")
+ pre = ' : '
+ for count, name in enumerate(names):
+ named_type = NamedType(_NAMED_TYPE_INFO[name])
+ if named_type.IsConstant():
+ continue
+ if named_type.GetValidValues():
+ code = """%(pre)s%(name)s(
+ valid_%(name)s_table, arraysize(valid_%(name)s_table))"""
+ else:
+ code = "%(pre)s%(name)s()"
+ file.Write(code % {
+ 'name': ToUnderscore(name),
+ 'pre': pre,
+ })
+ pre = ',\n '
+ file.Write(" {\n");
+ file.Write("}\n\n");
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteCommonUtilsHeader(self, filename):
+ """Writes the gles2 common utility header."""
+ file = CHeaderWriter(filename)
+ type_infos = sorted(_NAMED_TYPE_INFO.keys())
+ for type_info in type_infos:
+ if _NAMED_TYPE_INFO[type_info]['type'] == 'GLenum':
+ file.Write("static std::string GetString%s(uint32_t value);\n" %
+ type_info)
+ file.Write("\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteCommonUtilsImpl(self, filename):
+ """Writes the gles2 common utility header."""
+ enum_re = re.compile(r'\#define\s+(GL_[a-zA-Z0-9_]+)\s+([0-9A-Fa-fx]+)')
+ dict = {}
+ for fname in ['third_party/khronos/GLES2/gl2.h',
+ 'third_party/khronos/GLES2/gl2ext.h',
+ 'gpu/GLES2/gl2chromium.h',
+ 'gpu/GLES2/gl2extchromium.h']:
+ lines = open(fname).readlines()
+ for line in lines:
+ m = enum_re.match(line)
+ if m:
+ name = m.group(1)
+ value = m.group(2)
+ if len(value) <= 10 and not value in dict:
+ dict[value] = name
+
+ file = CHeaderWriter(filename)
+ file.Write("static const GLES2Util::EnumToString "
+ "enum_to_string_table[] = {\n")
+ for value in dict:
+ file.Write(' { %s, "%s", },\n' % (value, dict[value]))
+ file.Write("""};
+
+const GLES2Util::EnumToString* const GLES2Util::enum_to_string_table_ =
+ enum_to_string_table;
+const size_t GLES2Util::enum_to_string_table_len_ =
+ sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
+
+""")
+
+ enums = sorted(_NAMED_TYPE_INFO.keys())
+ for enum in enums:
+ if _NAMED_TYPE_INFO[enum]['type'] == 'GLenum':
+ file.Write("std::string GLES2Util::GetString%s(uint32_t value) {\n" %
+ enum)
+ if len(_NAMED_TYPE_INFO[enum]['valid']) > 0:
+ file.Write(" static const EnumToString string_table[] = {\n")
+ for value in _NAMED_TYPE_INFO[enum]['valid']:
+ file.Write(' { %s, "%s" },\n' % (value, value))
+ file.Write(""" };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+""")
+ else:
+ file.Write(""" return GLES2Util::GetQualifiedEnumString(
+ NULL, 0, value);
+}
+
+""")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WritePepperGLES2Interface(self, filename, dev):
+ """Writes the Pepper OpenGLES interface definition."""
+ file = CWriter(filename)
+ file.Write(_LICENSE)
+ file.Write(_DO_NOT_EDIT_WARNING)
+
+ file.Write("label Chrome {\n")
+ file.Write(" M39 = 1.0\n")
+ file.Write("};\n\n")
+
+ if not dev:
+ # Declare GL types.
+ file.Write("[version=1.0]\n")
+ file.Write("describe {\n")
+ for gltype in ['GLbitfield', 'GLboolean', 'GLbyte', 'GLclampf',
+ 'GLclampx', 'GLenum', 'GLfixed', 'GLfloat', 'GLint',
+ 'GLintptr', 'GLshort', 'GLsizei', 'GLsizeiptr',
+ 'GLubyte', 'GLuint', 'GLushort']:
+ file.Write(" %s;\n" % gltype)
+ file.Write(" %s_ptr_t;\n" % gltype)
+ file.Write("};\n\n")
+
+ # C level typedefs.
+ file.Write("#inline c\n")
+ file.Write("#include \"ppapi/c/pp_resource.h\"\n")
+ if dev:
+ file.Write("#include \"ppapi/c/ppb_opengles2.h\"\n\n")
+ else:
+ file.Write("\n#ifndef __gl2_h_\n")
+ for (k, v) in _GL_TYPES.iteritems():
+ file.Write("typedef %s %s;\n" % (v, k))
+ file.Write("#ifdef _WIN64\n")
+ for (k, v) in _GL_TYPES_64.iteritems():
+ file.Write("typedef %s %s;\n" % (v, k))
+ file.Write("#else\n")
+ for (k, v) in _GL_TYPES_32.iteritems():
+ file.Write("typedef %s %s;\n" % (v, k))
+ file.Write("#endif // _WIN64\n")
+ file.Write("#endif // __gl2_h_\n\n")
+ file.Write("#endinl\n")
+
+ for interface in self.pepper_interfaces:
+ if interface.dev != dev:
+ continue
+ # Historically, we provide OpenGLES2 interfaces with struct
+ # namespace. Not to break code which uses the interface as
+ # "struct OpenGLES2", we put it in struct namespace.
+ file.Write('\n[macro="%s", force_struct_namespace]\n' %
+ interface.GetInterfaceName())
+ file.Write("interface %s {\n" % interface.GetStructName())
+ for func in self.original_functions:
+ if not func.InPepperInterface(interface):
+ continue
+
+ ret_type = func.MapCTypeToPepperIdlType(func.return_type,
+ is_for_return_type=True)
+ func_prefix = " %s %s(" % (ret_type, func.GetPepperName())
+ file.Write(func_prefix)
+ file.Write("[in] PP_Resource context")
+ for arg in func.MakeTypedPepperIdlArgStrings():
+ file.Write(",\n" + " " * len(func_prefix) + arg)
+ file.Write(");\n")
+ file.Write("};\n\n")
+
+
+ file.Close()
+
+ def WritePepperGLES2Implementation(self, filename):
+ """Writes the Pepper OpenGLES interface implementation."""
+
+ file = CWriter(filename)
+ file.Write(_LICENSE)
+ file.Write(_DO_NOT_EDIT_WARNING)
+
+ file.Write("#include \"ppapi/shared_impl/ppb_opengles2_shared.h\"\n\n")
+ file.Write("#include \"base/logging.h\"\n")
+ file.Write("#include \"gpu/command_buffer/client/gles2_implementation.h\"\n")
+ file.Write("#include \"ppapi/shared_impl/ppb_graphics_3d_shared.h\"\n")
+ file.Write("#include \"ppapi/thunk/enter.h\"\n\n")
+
+ file.Write("namespace ppapi {\n\n")
+ file.Write("namespace {\n\n")
+
+ file.Write("typedef thunk::EnterResource<thunk::PPB_Graphics3D_API>"
+ " Enter3D;\n\n")
+
+ file.Write("gpu::gles2::GLES2Implementation* ToGles2Impl(Enter3D*"
+ " enter) {\n")
+ file.Write(" DCHECK(enter);\n")
+ file.Write(" DCHECK(enter->succeeded());\n")
+ file.Write(" return static_cast<PPB_Graphics3D_Shared*>(enter->object())->"
+ "gles2_impl();\n");
+ file.Write("}\n\n");
+
+ for func in self.original_functions:
+ if not func.InAnyPepperExtension():
+ continue
+
+ original_arg = func.MakeTypedPepperArgString("")
+ context_arg = "PP_Resource context_id"
+ if len(original_arg):
+ arg = context_arg + ", " + original_arg
+ else:
+ arg = context_arg
+ file.Write("%s %s(%s) {\n" %
+ (func.return_type, func.GetPepperName(), arg))
+ file.Write(" Enter3D enter(context_id, true);\n")
+ file.Write(" if (enter.succeeded()) {\n")
+
+ return_str = "" if func.return_type == "void" else "return "
+ file.Write(" %sToGles2Impl(&enter)->%s(%s);\n" %
+ (return_str, func.original_name,
+ func.MakeOriginalArgString("")))
+ file.Write(" }")
+ if func.return_type == "void":
+ file.Write("\n")
+ else:
+ file.Write(" else {\n")
+ file.Write(" return %s;\n" % func.GetErrorReturnString())
+ file.Write(" }\n")
+ file.Write("}\n\n")
+
+ file.Write("} // namespace\n")
+
+ for interface in self.pepper_interfaces:
+ file.Write("const %s* PPB_OpenGLES2_Shared::Get%sInterface() {\n" %
+ (interface.GetStructName(), interface.GetName()))
+ file.Write(" static const struct %s "
+ "ppb_opengles2 = {\n" % interface.GetStructName())
+ file.Write(" &")
+ file.Write(",\n &".join(
+ f.GetPepperName() for f in self.original_functions
+ if f.InPepperInterface(interface)))
+ file.Write("\n")
+
+ file.Write(" };\n")
+ file.Write(" return &ppb_opengles2;\n")
+ file.Write("}\n")
+
+ file.Write("} // namespace ppapi\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteGLES2ToPPAPIBridge(self, filename):
+ """Connects GLES2 helper library to PPB_OpenGLES2 interface"""
+
+ file = CWriter(filename)
+ file.Write(_LICENSE)
+ file.Write(_DO_NOT_EDIT_WARNING)
+
+ file.Write("#ifndef GL_GLEXT_PROTOTYPES\n")
+ file.Write("#define GL_GLEXT_PROTOTYPES\n")
+ file.Write("#endif\n")
+ file.Write("#include <GLES2/gl2.h>\n")
+ file.Write("#include <GLES2/gl2ext.h>\n")
+ file.Write("#include \"ppapi/lib/gl/gles2/gl2ext_ppapi.h\"\n\n")
+
+ for func in self.original_functions:
+ if not func.InAnyPepperExtension():
+ continue
+
+ interface = self.interface_info[func.GetInfo('pepper_interface') or '']
+
+ file.Write("%s GL_APIENTRY gl%s(%s) {\n" %
+ (func.return_type, func.GetPepperName(),
+ func.MakeTypedPepperArgString("")))
+ return_str = "" if func.return_type == "void" else "return "
+ interface_str = "glGet%sInterfacePPAPI()" % interface.GetName()
+ original_arg = func.MakeOriginalArgString("")
+ context_arg = "glGetCurrentContextPPAPI()"
+ if len(original_arg):
+ arg = context_arg + ", " + original_arg
+ else:
+ arg = context_arg
+ if interface.GetName():
+ file.Write(" const struct %s* ext = %s;\n" %
+ (interface.GetStructName(), interface_str))
+ file.Write(" if (ext)\n")
+ file.Write(" %sext->%s(%s);\n" %
+ (return_str, func.GetPepperName(), arg))
+ if return_str:
+ file.Write(" %s0;\n" % return_str)
+ else:
+ file.Write(" %s%s->%s(%s);\n" %
+ (return_str, interface_str, func.GetPepperName(), arg))
+ file.Write("}\n\n")
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteMojoGLCallVisitor(self, filename):
+ """Provides the GL implementation for mojo"""
+ file = CWriter(filename)
+ file.Write(_LICENSE)
+ file.Write(_DO_NOT_EDIT_WARNING)
+
+ for func in self.original_functions:
+ if not func.IsCoreGLFunction():
+ continue
+ file.Write("VISIT_GL_CALL(%s, %s, (%s), (%s))\n" %
+ (func.name, func.return_type,
+ func.MakeTypedOriginalArgString(""),
+ func.MakeOriginalArgString("")))
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+ def WriteMojoGLCallVisitorForExtension(self, filename, extension):
+ """Provides the GL implementation for mojo for a particular extension"""
+ file = CWriter(filename)
+ file.Write(_LICENSE)
+ file.Write(_DO_NOT_EDIT_WARNING)
+
+ for func in self.original_functions:
+ if func.GetInfo("extension") != extension:
+ continue
+ file.Write("VISIT_GL_CALL(%s, %s, (%s), (%s))\n" %
+ (func.name, func.return_type,
+ func.MakeTypedOriginalArgString(""),
+ func.MakeOriginalArgString("")))
+
+ file.Close()
+ self.generated_cpp_filenames.append(file.filename)
+
+def Format(generated_files):
+ for filename in generated_files:
+ call(["clang-format", "-i", "-style=chromium", filename])
+
+def main(argv):
+ """This is the main function."""
+ parser = OptionParser()
+ parser.add_option(
+ "--output-dir",
+ help="base directory for resulting files, under chrome/src. default is "
+ "empty. Use this if you want the result stored under gen.")
+ parser.add_option(
+ "-v", "--verbose", action="store_true",
+ help="prints more output.")
+
+ (options, args) = parser.parse_args(args=argv)
+
+ # Add in states and capabilites to GLState
+ gl_state_valid = _NAMED_TYPE_INFO['GLState']['valid']
+ for state_name in sorted(_STATES.keys()):
+ state = _STATES[state_name]
+ if 'extension_flag' in state:
+ continue
+ if 'enum' in state:
+ if not state['enum'] in gl_state_valid:
+ gl_state_valid.append(state['enum'])
+ else:
+ for item in state['states']:
+ if 'extension_flag' in item:
+ continue
+ if not item['enum'] in gl_state_valid:
+ gl_state_valid.append(item['enum'])
+ for capability in _CAPABILITY_FLAGS:
+ valid_value = "GL_%s" % capability['name'].upper()
+ if not valid_value in gl_state_valid:
+ gl_state_valid.append(valid_value)
+
+ # This script lives under gpu/command_buffer, cd to base directory.
+ os.chdir(os.path.dirname(__file__) + "/../..")
+ base_dir = os.getcwd()
+ gen = GLGenerator(options.verbose)
+ gen.ParseGLH("gpu/command_buffer/cmd_buffer_functions.txt")
+
+ # Support generating files under gen/
+ if options.output_dir != None:
+ os.chdir(options.output_dir)
+
+ gen.WritePepperGLES2Interface("ppapi/api/ppb_opengles2.idl", False)
+ gen.WritePepperGLES2Interface("ppapi/api/dev/ppb_opengles2ext_dev.idl", True)
+ gen.WriteGLES2ToPPAPIBridge("ppapi/lib/gl/gles2/gles2.c")
+ gen.WritePepperGLES2Implementation(
+ "ppapi/shared_impl/ppb_opengles2_shared.cc")
+ os.chdir(base_dir)
+ gen.WriteCommandIds("gpu/command_buffer/common/gles2_cmd_ids_autogen.h")
+ gen.WriteFormat("gpu/command_buffer/common/gles2_cmd_format_autogen.h")
+ gen.WriteFormatTest(
+ "gpu/command_buffer/common/gles2_cmd_format_test_autogen.h")
+ gen.WriteGLES2InterfaceHeader(
+ "gpu/command_buffer/client/gles2_interface_autogen.h")
+ gen.WriteGLES2InterfaceStub(
+ "gpu/command_buffer/client/gles2_interface_stub_autogen.h")
+ gen.WriteGLES2InterfaceStubImpl(
+ "gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h")
+ gen.WriteGLES2ImplementationHeader(
+ "gpu/command_buffer/client/gles2_implementation_autogen.h")
+ gen.WriteGLES2Implementation(
+ "gpu/command_buffer/client/gles2_implementation_impl_autogen.h")
+ gen.WriteGLES2ImplementationUnitTests(
+ "gpu/command_buffer/client/gles2_implementation_unittest_autogen.h")
+ gen.WriteGLES2TraceImplementationHeader(
+ "gpu/command_buffer/client/gles2_trace_implementation_autogen.h")
+ gen.WriteGLES2TraceImplementation(
+ "gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h")
+ gen.WriteGLES2CLibImplementation(
+ "gpu/command_buffer/client/gles2_c_lib_autogen.h")
+ gen.WriteCmdHelperHeader(
+ "gpu/command_buffer/client/gles2_cmd_helper_autogen.h")
+ gen.WriteServiceImplementation(
+ "gpu/command_buffer/service/gles2_cmd_decoder_autogen.h")
+ gen.WriteServiceContextStateHeader(
+ "gpu/command_buffer/service/context_state_autogen.h")
+ gen.WriteServiceContextStateImpl(
+ "gpu/command_buffer/service/context_state_impl_autogen.h")
+ gen.WriteClientContextStateHeader(
+ "gpu/command_buffer/client/client_context_state_autogen.h")
+ gen.WriteClientContextStateImpl(
+ "gpu/command_buffer/client/client_context_state_impl_autogen.h")
+ gen.WriteServiceUnitTests(
+ "gpu/command_buffer/service/gles2_cmd_decoder_unittest_%d_autogen.h")
+ gen.WriteServiceUnitTestsForExtensions(
+ "gpu/command_buffer/service/"
+ "gles2_cmd_decoder_unittest_extensions_autogen.h")
+ gen.WriteServiceUtilsHeader(
+ "gpu/command_buffer/service/gles2_cmd_validation_autogen.h")
+ gen.WriteServiceUtilsImplementation(
+ "gpu/command_buffer/service/"
+ "gles2_cmd_validation_implementation_autogen.h")
+ gen.WriteCommonUtilsHeader(
+ "gpu/command_buffer/common/gles2_cmd_utils_autogen.h")
+ gen.WriteCommonUtilsImpl(
+ "gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h")
+ gen.WriteGLES2Header("gpu/GLES2/gl2chromium_autogen.h")
+ mojo_gles2_prefix = "mojo/public/c/gles2/gles2_call_visitor"
+ gen.WriteMojoGLCallVisitor(mojo_gles2_prefix + "_autogen.h")
+ gen.WriteMojoGLCallVisitorForExtension(
+ mojo_gles2_prefix + "_chromium_texture_mailbox_autogen.h",
+ "CHROMIUM_texture_mailbox")
+ gen.WriteMojoGLCallVisitorForExtension(
+ mojo_gles2_prefix + "_chromium_sync_point_autogen.h",
+ "CHROMIUM_sync_point")
+
+ Format(gen.generated_cpp_filenames)
+
+ if gen.errors > 0:
+ print "%d errors" % gen.errors
+ return 1
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/gpu/command_buffer/client/BUILD.gn b/gpu/command_buffer/client/BUILD.gn
new file mode 100644
index 0000000..afd03a4
--- /dev/null
+++ b/gpu/command_buffer/client/BUILD.gn
@@ -0,0 +1,193 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("client") {
+ sources = [
+ "cmd_buffer_helper.cc",
+ "cmd_buffer_helper.h",
+ "fenced_allocator.cc",
+ "fenced_allocator.h",
+ "gpu_control.h",
+ "mapped_memory.cc",
+ "mapped_memory.h",
+ "ring_buffer.cc",
+ "ring_buffer.h",
+ "transfer_buffer.cc",
+ "transfer_buffer.h",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ deps = [
+ "//gpu/command_buffer/common",
+ ]
+}
+
+group("gles2_cmd_helper") {
+ if (is_component_build) {
+ deps = [ "//gpu" ]
+ } else {
+ deps = [ ":gles2_cmd_helper_sources" ]
+ }
+}
+
+source_set("gles2_cmd_helper_sources") {
+ visibility = [ ":gles2_cmd_helper", "//gpu" ]
+ sources = [
+ "gles2_cmd_helper.cc",
+ "gles2_cmd_helper.h",
+ "gles2_cmd_helper_autogen.h",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [ ":client" ]
+}
+
+gles2_c_lib_source_files = [
+ "gles2_c_lib.cc",
+ "gles2_c_lib_autogen.h",
+ "gles2_c_lib_export.h",
+ "gles2_lib.h",
+ "gles2_lib.cc",
+]
+
+gles2_implementation_source_files = [
+ "buffer_tracker.cc",
+ "buffer_tracker.h",
+ "client_context_state.h",
+ "client_context_state.cc",
+ "client_context_state_autogen.h",
+ "client_context_state_impl_autogen.h",
+ "gles2_impl_export.h",
+ "gles2_implementation_autogen.h",
+ "gles2_implementation.cc",
+ "gles2_implementation.h",
+ "gles2_implementation_impl_autogen.h",
+ "gles2_trace_implementation_autogen.h",
+ "gles2_trace_implementation.cc",
+ "gles2_trace_implementation.h",
+ "gles2_trace_implementation_impl_autogen.h",
+ "gpu_memory_buffer_factory.h",
+ "gpu_memory_buffer_tracker.cc",
+ "gpu_memory_buffer_tracker.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "program_info_manager.cc",
+ "program_info_manager.h",
+ "query_tracker.cc",
+ "query_tracker.h",
+ "share_group.cc",
+ "share_group.h",
+ "vertex_array_object_manager.cc",
+ "vertex_array_object_manager.h",
+]
+
+# Provides GLES2 interface, but does not cause any implementation to be linked
+# in. Useful when a target uses the interface, but permits its users to choose
+# an implementation.
+source_set("gles2_interface") {
+ sources = [ "gles2_interface.h" ]
+ public_configs = [ "//third_party/khronos:khronos_headers" ]
+ deps = [
+ "//base",
+ ]
+}
+
+# Library emulates GLES2 using command_buffers.
+component("gles2_implementation") {
+ sources = gles2_implementation_source_files
+
+ defines = [ "GLES2_IMPL_IMPLEMENTATION" ]
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":gles2_cmd_helper",
+ ":gles2_interface",
+ "//base",
+ "//gpu/command_buffer/common",
+ "//ui/gfx/geometry",
+ ]
+}
+
+# Library emulates GLES2 using command_buffers.
+component("gles2_implementation_client_side_arrays") {
+ sources = gles2_implementation_source_files
+
+ defines = [
+ "GLES2_IMPL_IMPLEMENTATION",
+ "GLES2_SUPPORT_CLIENT_SIDE_ARRAYS=1",
+ ]
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":gles2_cmd_helper",
+ ":gles2_interface",
+ "//base",
+ "//gpu/command_buffer/common",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+}
+
+component("gl_in_process_context") {
+ sources = [
+ "gl_in_process_context.h",
+ "gl_in_process_context.cc",
+ "gl_in_process_context_export.h",
+ ]
+
+ defines = [ "GL_IN_PROCESS_CONTEXT_IMPLEMENTATION" ]
+
+ deps = [
+ ":gles2_implementation",
+ "//gpu",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+}
+
+component("gles2_c_lib") {
+ sources = gles2_c_lib_source_files
+ defines = [ "GLES2_C_LIB_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":client",
+ ":gles2_interface",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//gpu/command_buffer/common",
+ ]
+}
+
diff --git a/gpu/command_buffer/client/buffer_tracker.cc b/gpu/command_buffer/client/buffer_tracker.cc
new file mode 100644
index 0000000..5887e52
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/buffer_tracker.h"
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+namespace gpu {
+namespace gles2 {
+
+BufferTracker::BufferTracker(MappedMemoryManager* manager)
+ : mapped_memory_(manager) {
+}
+
+BufferTracker::~BufferTracker() {
+ while (!buffers_.empty()) {
+ RemoveBuffer(buffers_.begin()->first);
+ }
+}
+
+BufferTracker::Buffer* BufferTracker::CreateBuffer(
+ GLuint id, GLsizeiptr size) {
+ DCHECK_NE(0u, id);
+ DCHECK_LE(0, size);
+ int32 shm_id = -1;
+ uint32 shm_offset = 0;
+ void* address = NULL;
+ if (size)
+ address = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+
+ Buffer* buffer = new Buffer(id, size, shm_id, shm_offset, address);
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(id, buffer));
+ DCHECK(result.second);
+ return buffer;
+}
+
+BufferTracker::Buffer* BufferTracker::GetBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ return it != buffers_.end() ? it->second : NULL;
+}
+
+void BufferTracker::RemoveBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ if (it != buffers_.end()) {
+ Buffer* buffer = it->second;
+ buffers_.erase(it);
+ if (buffer->address_)
+ mapped_memory_->Free(buffer->address_);
+ delete buffer;
+ }
+}
+
+void BufferTracker::FreePendingToken(Buffer* buffer, int32 token) {
+ if (buffer->address_)
+ mapped_memory_->FreePendingToken(buffer->address_, token);
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+void BufferTracker::Unmanage(Buffer* buffer) {
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+void BufferTracker::Free(Buffer* buffer) {
+ if (buffer->address_)
+ mapped_memory_->Free(buffer->address_);
+
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/buffer_tracker.h b/gpu/command_buffer/client/buffer_tracker.h
new file mode 100644
index 0000000..33bd94b
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker.h
@@ -0,0 +1,125 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
+
+#include <GLES2/gl2.h>
+
+#include <queue>
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+class MappedMemoryManager;
+
+namespace gles2 {
+
+// Tracks buffer objects for client side of command buffer.
+class GLES2_IMPL_EXPORT BufferTracker {
+ public:
+ class GLES2_IMPL_EXPORT Buffer {
+ public:
+ Buffer(GLuint id,
+ unsigned int size,
+ int32 shm_id,
+ uint32 shm_offset,
+ void* address)
+ : id_(id),
+ size_(size),
+ shm_id_(shm_id),
+ shm_offset_(shm_offset),
+ address_(address),
+ mapped_(false),
+ last_usage_token_(0),
+ last_async_upload_token_(0) {
+ }
+
+ GLenum id() const {
+ return id_;
+ }
+
+ unsigned int size() const {
+ return size_;
+ }
+
+ int32 shm_id() const {
+ return shm_id_;
+ }
+
+ uint32 shm_offset() const {
+ return shm_offset_;
+ }
+
+ void* address() const {
+ return address_;
+ }
+
+ void set_mapped(bool mapped) {
+ mapped_ = mapped;
+ }
+
+ bool mapped() const {
+ return mapped_;
+ }
+
+ void set_last_usage_token(int token) {
+ last_usage_token_ = token;
+ }
+
+ int last_usage_token() const {
+ return last_usage_token_;
+ }
+
+ void set_last_async_upload_token(uint32 async_token) {
+ last_async_upload_token_ = async_token;
+ }
+
+ GLuint last_async_upload_token() const {
+ return last_async_upload_token_;
+ }
+
+ private:
+ friend class BufferTracker;
+ friend class BufferTrackerTest;
+
+ GLuint id_;
+ unsigned int size_;
+ int32 shm_id_;
+ uint32 shm_offset_;
+ void* address_;
+ bool mapped_;
+ int32 last_usage_token_;
+ GLuint last_async_upload_token_;
+ };
+
+ BufferTracker(MappedMemoryManager* manager);
+ ~BufferTracker();
+
+ Buffer* CreateBuffer(GLuint id, GLsizeiptr size);
+ Buffer* GetBuffer(GLuint id);
+ void RemoveBuffer(GLuint id);
+
+ // Frees the block of memory associated with buffer, pending the passage
+ // of a token.
+ void FreePendingToken(Buffer* buffer, int32 token);
+ void Unmanage(Buffer* buffer);
+ void Free(Buffer* buffer);
+
+ private:
+ typedef base::hash_map<GLuint, Buffer*> BufferMap;
+
+ MappedMemoryManager* mapped_memory_;
+ BufferMap buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
diff --git a/gpu/command_buffer/client/buffer_tracker_unittest.cc b/gpu/command_buffer/client/buffer_tracker_unittest.cc
new file mode 100644
index 0000000..f6174c0
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker_unittest.cc
@@ -0,0 +1,153 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the BufferTracker.
+
+#include "gpu/command_buffer/client/buffer_tracker.h"
+
+#include <GLES2/gl2ext.h>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MockClientCommandBufferImpl : public MockClientCommandBuffer {
+ public:
+ MockClientCommandBufferImpl()
+ : MockClientCommandBuffer(),
+ context_lost_(false) {}
+ virtual ~MockClientCommandBufferImpl() {}
+
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE {
+ if (context_lost_) {
+ *id = -1;
+ return NULL;
+ }
+ return MockClientCommandBuffer::CreateTransferBuffer(size, id);
+ }
+
+ void set_context_lost(bool context_lost) {
+ context_lost_ = context_lost;
+ }
+
+ private:
+ bool context_lost_;
+};
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+class BufferTrackerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBufferImpl());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ buffer_tracker_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ scoped_ptr<MockClientCommandBufferImpl> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<BufferTracker> buffer_tracker_;
+};
+
+TEST_F(BufferTrackerTest, Basic) {
+ const GLuint kId1 = 123;
+ const GLuint kId2 = 124;
+ const GLsizeiptr size = 64;
+
+ // Check we can create a Buffer.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId1, size);
+ ASSERT_TRUE(buffer != NULL);
+ // Check we can get the same Buffer.
+ EXPECT_EQ(buffer, buffer_tracker_->GetBuffer(kId1));
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() != NULL);
+ // Check shared memory was allocated.
+ EXPECT_EQ(1lu, mapped_memory_->num_chunks());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(buffer_tracker_->GetBuffer(kId2) == NULL);
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId1);
+ // Check shared memory was freed.
+ mapped_memory_->FreeUnused();
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(buffer_tracker_->GetBuffer(kId1) == NULL);
+}
+
+TEST_F(BufferTrackerTest, ZeroSize) {
+ const GLuint kId = 123;
+
+ // Check we can create a Buffer with zero size.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, 0);
+ ASSERT_TRUE(buffer != NULL);
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() == NULL);
+ // Check no shared memory was allocated.
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId);
+}
+
+TEST_F(BufferTrackerTest, LostContext) {
+ const GLuint kId = 123;
+ const GLsizeiptr size = 64;
+
+ command_buffer_->set_context_lost(true);
+ // Check we can create a Buffer when after losing context.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, size);
+ ASSERT_TRUE(buffer != NULL);
+ // Check mapped memory address.
+ EXPECT_EQ(64u, buffer->size());
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() == NULL);
+ // Check no shared memory was allocated.
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId);
+}
+
+TEST_F(BufferTrackerTest, Unmanage) {
+ const GLuint kId = 123;
+ const GLsizeiptr size = 64;
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, size);
+ ASSERT_TRUE(buffer != NULL);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ void* mem = buffer->address();
+ buffer_tracker_->Unmanage(buffer);
+ buffer_tracker_->RemoveBuffer(kId);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ mapped_memory_->Free(mem);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(0));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/client_context_state.cc b/gpu/command_buffer/client/client_context_state.cc
new file mode 100644
index 0000000..9f4fbad
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/client_context_state.h"
+
+#include "base/logging.h"
+
+namespace gpu {
+namespace gles2 {
+
+ClientContextState::ClientContextState() {
+}
+
+ClientContextState::~ClientContextState() {
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/client_context_state_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/client_context_state.h b/gpu/command_buffer/client/client_context_state.h
new file mode 100644
index 0000000..f5a93a6
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ContextState class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+
+#include <GLES2/gl2.h>
+#include <vector>
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+struct GLES2_IMPL_EXPORT ClientContextState {
+ ClientContextState();
+ ~ClientContextState();
+
+ // Returns true if state was cached in which case 'enabled' will be set to the
+ // current state.
+ bool GetEnabled(GLenum cap, bool* enabled) const;
+
+ // Sets the state of a capability.
+ // Returns true if the capability is one that is cached.
+ // 'changed' will be true if the state was different from 'enabled.
+ bool SetCapabilityState(GLenum cap, bool enabled, bool* changed);
+
+ #include "gpu/command_buffer/client/client_context_state_autogen.h"
+
+ EnableFlags enable_flags;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+
diff --git a/gpu/command_buffer/client/client_context_state_autogen.h b/gpu/command_buffer/client/client_context_state_autogen.h
new file mode 100644
index 0000000..72a4f72
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state_autogen.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by client_context_state.h
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
+
+struct EnableFlags {
+ EnableFlags();
+ bool blend;
+ bool cull_face;
+ bool depth_test;
+ bool dither;
+ bool polygon_offset_fill;
+ bool sample_alpha_to_coverage;
+ bool sample_coverage;
+ bool scissor_test;
+ bool stencil_test;
+};
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/client_context_state_impl_autogen.h b/gpu/command_buffer/client/client_context_state_impl_autogen.h
new file mode 100644
index 0000000..cff14f7
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state_impl_autogen.h
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by client_context_state.cc
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
+
+ClientContextState::EnableFlags::EnableFlags()
+ : blend(false),
+ cull_face(false),
+ depth_test(false),
+ dither(true),
+ polygon_offset_fill(false),
+ sample_alpha_to_coverage(false),
+ sample_coverage(false),
+ scissor_test(false),
+ stencil_test(false) {
+}
+
+bool ClientContextState::SetCapabilityState(GLenum cap,
+ bool enabled,
+ bool* changed) {
+ *changed = false;
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags.blend != enabled) {
+ *changed = true;
+ enable_flags.blend = enabled;
+ }
+ return true;
+ case GL_CULL_FACE:
+ if (enable_flags.cull_face != enabled) {
+ *changed = true;
+ enable_flags.cull_face = enabled;
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ if (enable_flags.depth_test != enabled) {
+ *changed = true;
+ enable_flags.depth_test = enabled;
+ }
+ return true;
+ case GL_DITHER:
+ if (enable_flags.dither != enabled) {
+ *changed = true;
+ enable_flags.dither = enabled;
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags.polygon_offset_fill != enabled) {
+ *changed = true;
+ enable_flags.polygon_offset_fill = enabled;
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags.sample_alpha_to_coverage != enabled) {
+ *changed = true;
+ enable_flags.sample_alpha_to_coverage = enabled;
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags.sample_coverage != enabled) {
+ *changed = true;
+ enable_flags.sample_coverage = enabled;
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ if (enable_flags.scissor_test != enabled) {
+ *changed = true;
+ enable_flags.scissor_test = enabled;
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ if (enable_flags.stencil_test != enabled) {
+ *changed = true;
+ enable_flags.stencil_test = enabled;
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+bool ClientContextState::GetEnabled(GLenum cap, bool* enabled) const {
+ switch (cap) {
+ case GL_BLEND:
+ *enabled = enable_flags.blend;
+ return true;
+ case GL_CULL_FACE:
+ *enabled = enable_flags.cull_face;
+ return true;
+ case GL_DEPTH_TEST:
+ *enabled = enable_flags.depth_test;
+ return true;
+ case GL_DITHER:
+ *enabled = enable_flags.dither;
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *enabled = enable_flags.polygon_offset_fill;
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *enabled = enable_flags.sample_alpha_to_coverage;
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *enabled = enable_flags.sample_coverage;
+ return true;
+ case GL_SCISSOR_TEST:
+ *enabled = enable_flags.scissor_test;
+ return true;
+ case GL_STENCIL_TEST:
+ *enabled = enable_flags.stencil_test;
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/client_test_helper.cc b/gpu/command_buffer/client/client_test_helper.cc
new file mode 100644
index 0000000..8c633ef
--- /dev/null
+++ b/gpu/command_buffer/client/client_test_helper.cc
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for GLES2Implementation.
+
+#include "gpu/command_buffer/client/client_test_helper.h"
+
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace gpu {
+
+MockCommandBufferBase::MockCommandBufferBase() {
+}
+
+MockCommandBufferBase::~MockCommandBufferBase() {
+}
+
+bool MockCommandBufferBase::Initialize() {
+ return true;
+}
+
+CommandBuffer::State MockCommandBufferBase::GetLastState() {
+ return state_;
+}
+
+int32 MockCommandBufferBase::GetLastToken() {
+ return state_.token;
+}
+
+void MockCommandBufferBase::SetGetOffset(int32 get_offset) {
+ state_.get_offset = get_offset;
+}
+
+void MockCommandBufferBase::WaitForTokenInRange(int32 start, int32 end) {}
+
+void MockCommandBufferBase::WaitForGetOffsetInRange(int32 start, int32 end) {
+ state_.get_offset = state_.put_offset;
+ OnFlush();
+}
+
+void MockCommandBufferBase::SetGetBuffer(int transfer_buffer_id) {
+ ring_buffer_buffer_ = GetTransferBuffer(transfer_buffer_id);
+ ring_buffer_ =
+ static_cast<CommandBufferEntry*>(ring_buffer_buffer_->memory());
+ state_.num_entries = ring_buffer_buffer_->size() / sizeof(ring_buffer_[0]);
+ state_.token = 10000; // All token checks in the tests should pass.
+}
+
+// Get's the Id of the next transfer buffer that will be returned
+// by CreateTransferBuffer. This is useful for testing expected ids.
+int32 MockCommandBufferBase::GetNextFreeTransferBufferId() {
+ for (size_t ii = 0; ii < arraysize(transfer_buffer_buffers_); ++ii) {
+ if (!transfer_buffer_buffers_[ii].get()) {
+ return kTransferBufferBaseId + ii;
+ }
+ }
+ return -1;
+}
+
+scoped_refptr<gpu::Buffer> MockCommandBufferBase::CreateTransferBuffer(
+ size_t size,
+ int32* id) {
+ *id = GetNextFreeTransferBufferId();
+ if (*id >= 0) {
+ int32 ndx = *id - kTransferBufferBaseId;
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(size);
+ transfer_buffer_buffers_[ndx] =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), size);
+ }
+ return GetTransferBuffer(*id);
+}
+
+void MockCommandBufferBase::DestroyTransferBufferHelper(int32 id) {
+ DCHECK_GE(id, kTransferBufferBaseId);
+ DCHECK_LT(id, kTransferBufferBaseId + kMaxTransferBuffers);
+ id -= kTransferBufferBaseId;
+ transfer_buffer_buffers_[id] = NULL;
+}
+
+scoped_refptr<Buffer> MockCommandBufferBase::GetTransferBuffer(int32 id) {
+ DCHECK_GE(id, kTransferBufferBaseId);
+ DCHECK_LT(id, kTransferBufferBaseId + kMaxTransferBuffers);
+ return transfer_buffer_buffers_[id - kTransferBufferBaseId];
+}
+
+void MockCommandBufferBase::FlushHelper(int32 put_offset) {
+ state_.put_offset = put_offset;
+}
+
+void MockCommandBufferBase::SetToken(int32 token) {
+ NOTREACHED();
+ state_.token = token;
+}
+
+void MockCommandBufferBase::SetParseError(error::Error error) {
+ NOTREACHED();
+ state_.error = error;
+}
+
+void MockCommandBufferBase::SetContextLostReason(
+ error::ContextLostReason reason) {
+ NOTREACHED();
+ state_.context_lost_reason = reason;
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 MockCommandBufferBase::kTransferBufferBaseId;
+const int32 MockCommandBufferBase::kMaxTransferBuffers;
+#endif
+
+MockClientCommandBuffer::MockClientCommandBuffer() {
+ DelegateToFake();
+}
+
+MockClientCommandBuffer::~MockClientCommandBuffer() {
+}
+
+void MockClientCommandBuffer::Flush(int32 put_offset) {
+ FlushHelper(put_offset);
+}
+
+void MockClientCommandBuffer::DelegateToFake() {
+ ON_CALL(*this, DestroyTransferBuffer(_))
+ .WillByDefault(Invoke(
+ this, &MockCommandBufferBase::DestroyTransferBufferHelper));
+}
+
+MockClientCommandBufferMockFlush::MockClientCommandBufferMockFlush() {
+ DelegateToFake();
+}
+
+MockClientCommandBufferMockFlush::~MockClientCommandBufferMockFlush() {
+}
+
+void MockClientCommandBufferMockFlush::DelegateToFake() {
+ MockClientCommandBuffer::DelegateToFake();
+ ON_CALL(*this, Flush(_))
+ .WillByDefault(Invoke(
+ this, &MockCommandBufferBase::FlushHelper));
+}
+
+MockClientGpuControl::MockClientGpuControl() {
+}
+
+MockClientGpuControl::~MockClientGpuControl() {
+}
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/client_test_helper.h b/gpu/command_buffer/client/client_test_helper.h
new file mode 100644
index 0000000..a0363a9
--- /dev/null
+++ b/gpu/command_buffer/client/client_test_helper.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper classes for implementing gpu client side unit tests.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/gpu_memory_allocation.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+class MockCommandBufferBase : public CommandBufferServiceBase {
+ public:
+ static const int32 kTransferBufferBaseId = 0x123;
+ static const int32 kMaxTransferBuffers = 6;
+
+ MockCommandBufferBase();
+ virtual ~MockCommandBufferBase();
+
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int transfer_buffer_id) OVERRIDE;
+ virtual void SetGetOffset(int32 get_offset) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+ virtual void SetToken(int32 token) OVERRIDE;
+ virtual void SetParseError(error::Error error) OVERRIDE;
+ virtual void SetContextLostReason(error::ContextLostReason reason) OVERRIDE;
+
+ // Get's the Id of the next transfer buffer that will be returned
+ // by CreateTransferBuffer. This is useful for testing expected ids.
+ int32 GetNextFreeTransferBufferId();
+
+ void FlushHelper(int32 put_offset);
+ void DestroyTransferBufferHelper(int32 id);
+
+ virtual void OnFlush() = 0;
+
+ private:
+ scoped_refptr<Buffer> transfer_buffer_buffers_[kMaxTransferBuffers];
+ CommandBufferEntry* ring_buffer_;
+ scoped_refptr<Buffer> ring_buffer_buffer_;
+ State state_;
+};
+
+class MockClientCommandBuffer : public MockCommandBufferBase {
+ public:
+ MockClientCommandBuffer();
+ virtual ~MockClientCommandBuffer();
+
+ // This is so we can use all the gmock functions when Flush is called.
+ MOCK_METHOD0(OnFlush, void());
+ MOCK_METHOD1(DestroyTransferBuffer, void(int32 id));
+
+ virtual void Flush(int32 put_offset) OVERRIDE;
+
+ void DelegateToFake();
+};
+
+class MockClientCommandBufferMockFlush : public MockClientCommandBuffer {
+ public:
+ MockClientCommandBufferMockFlush();
+ virtual ~MockClientCommandBufferMockFlush();
+
+ MOCK_METHOD1(Flush, void(int32 put_offset));
+
+ void DelegateToFake();
+};
+
+class MockClientGpuControl : public GpuControl {
+ public:
+ MockClientGpuControl();
+ virtual ~MockClientGpuControl();
+
+ MOCK_METHOD0(GetCapabilities, Capabilities());
+ MOCK_METHOD5(CreateGpuMemoryBuffer,
+ gfx::GpuMemoryBuffer*(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id));
+ MOCK_METHOD1(DestroyGpuMemoryBuffer, void(int32 id));
+ MOCK_METHOD0(InsertSyncPoint, uint32());
+ MOCK_METHOD0(InsertFutureSyncPoint, uint32());
+ MOCK_METHOD1(RetireSyncPoint, void(uint32 id));
+ MOCK_METHOD2(SignalSyncPoint, void(uint32 id, const base::Closure& callback));
+ MOCK_METHOD2(SignalQuery, void(uint32 query, const base::Closure& callback));
+ MOCK_METHOD1(SetSurfaceVisible, void(bool visible));
+ MOCK_METHOD1(CreateStreamTexture, uint32(uint32));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockClientGpuControl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc
new file mode 100644
index 0000000..a99201e
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the command buffer helper class.
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/trace_event.h"
+
+namespace gpu {
+
+CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
+ : command_buffer_(command_buffer),
+ ring_buffer_id_(-1),
+ ring_buffer_size_(0),
+ entries_(NULL),
+ total_entry_count_(0),
+ immediate_entry_count_(0),
+ token_(0),
+ put_(0),
+ last_put_sent_(0),
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ commands_issued_(0),
+#endif
+ usable_(true),
+ context_lost_(false),
+ flush_automatically_(true),
+ flush_generation_(0) {
+}
+
+void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
+ flush_automatically_ = enabled;
+ CalcImmediateEntries(0);
+}
+
+bool CommandBufferHelper::IsContextLost() {
+ if (!context_lost_) {
+ context_lost_ = error::IsError(command_buffer()->GetLastError());
+ }
+ return context_lost_;
+}
+
+void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
+ DCHECK_GE(waiting_count, 0);
+
+ // Check if usable & allocated.
+ if (!usable() || !HaveRingBuffer()) {
+ immediate_entry_count_ = 0;
+ return;
+ }
+
+ // Get maximum safe contiguous entries.
+ const int32 curr_get = get_offset();
+ if (curr_get > put_) {
+ immediate_entry_count_ = curr_get - put_ - 1;
+ } else {
+ immediate_entry_count_ =
+ total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
+ }
+
+ // Limit entry count to force early flushing.
+ if (flush_automatically_) {
+ int32 limit =
+ total_entry_count_ /
+ ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
+
+ int32 pending =
+ (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
+
+ if (pending > 0 && pending >= limit) {
+ // Time to force flush.
+ immediate_entry_count_ = 0;
+ } else {
+ // Limit remaining entries, but not lower than waiting_count entries to
+ // prevent deadlock when command size is greater than the flush limit.
+ limit -= pending;
+ limit = limit < waiting_count ? waiting_count : limit;
+ immediate_entry_count_ =
+ immediate_entry_count_ > limit ? limit : immediate_entry_count_;
+ }
+ }
+}
+
+bool CommandBufferHelper::AllocateRingBuffer() {
+ if (!usable()) {
+ return false;
+ }
+
+ if (HaveRingBuffer()) {
+ return true;
+ }
+
+ int32 id = -1;
+ scoped_refptr<Buffer> buffer =
+ command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
+ if (id < 0) {
+ ClearUsable();
+ return false;
+ }
+
+ ring_buffer_ = buffer;
+ ring_buffer_id_ = id;
+ command_buffer_->SetGetBuffer(id);
+ entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
+ total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
+ // Call to SetGetBuffer(id) above resets get and put offsets to 0.
+ // No need to query it through IPC.
+ put_ = 0;
+ CalcImmediateEntries(0);
+ return true;
+}
+
+void CommandBufferHelper::FreeResources() {
+ if (HaveRingBuffer()) {
+ command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
+ ring_buffer_id_ = -1;
+ CalcImmediateEntries(0);
+ }
+}
+
+void CommandBufferHelper::FreeRingBuffer() {
+ CHECK((put_ == get_offset()) ||
+ error::IsError(command_buffer_->GetLastState().error));
+ FreeResources();
+}
+
+bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
+ ring_buffer_size_ = ring_buffer_size;
+ return AllocateRingBuffer();
+}
+
+CommandBufferHelper::~CommandBufferHelper() {
+ FreeResources();
+}
+
+bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
+ if (!usable()) {
+ return false;
+ }
+ command_buffer_->WaitForGetOffsetInRange(start, end);
+ return command_buffer_->GetLastError() == gpu::error::kNoError;
+}
+
+void CommandBufferHelper::Flush() {
+ // Wrap put_ before flush.
+ if (put_ == total_entry_count_)
+ put_ = 0;
+
+ if (usable() && last_put_sent_ != put_) {
+ last_flush_time_ = base::TimeTicks::Now();
+ last_put_sent_ = put_;
+ command_buffer_->Flush(put_);
+ ++flush_generation_;
+ CalcImmediateEntries(0);
+ }
+}
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+void CommandBufferHelper::PeriodicFlushCheck() {
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ if (current_time - last_flush_time_ >
+ base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
+ Flush();
+ }
+}
+#endif
+
+// Calls Flush() and then waits until the buffer is empty. Break early if the
+// error is set.
+bool CommandBufferHelper::Finish() {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
+ if (!usable()) {
+ return false;
+ }
+ // If there is no work just exit.
+ if (put_ == get_offset()) {
+ return true;
+ }
+ DCHECK(HaveRingBuffer());
+ Flush();
+ if (!WaitForGetOffsetInRange(put_, put_))
+ return false;
+ DCHECK_EQ(get_offset(), put_);
+
+ CalcImmediateEntries(0);
+
+ return true;
+}
+
+// Inserts a new token into the command stream. It uses an increasing value
+// scheme so that we don't lose tokens (a token has passed if the current token
+// value is higher than that token). Calls Finish() if the token value wraps,
+// which will be rare.
+int32 CommandBufferHelper::InsertToken() {
+ AllocateRingBuffer();
+ if (!usable()) {
+ return token_;
+ }
+ DCHECK(HaveRingBuffer());
+ // Increment token as 31-bit integer. Negative values are used to signal an
+ // error.
+ token_ = (token_ + 1) & 0x7FFFFFFF;
+ cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
+ if (cmd) {
+ cmd->Init(token_);
+ if (token_ == 0) {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
+ // we wrapped
+ Finish();
+ DCHECK_EQ(token_, last_token_read());
+ }
+ }
+ return token_;
+}
+
+// Waits until the current token value is greater or equal to the value passed
+// in argument.
+void CommandBufferHelper::WaitForToken(int32 token) {
+ if (!usable() || !HaveRingBuffer()) {
+ return;
+ }
+ // Return immediately if corresponding InsertToken failed.
+ if (token < 0)
+ return;
+ if (token > token_) return; // we wrapped
+ if (last_token_read() >= token)
+ return;
+ Flush();
+ command_buffer_->WaitForTokenInRange(token, token_);
+}
+
+// Waits for available entries, basically waiting until get >= put + count + 1.
+// It actually waits for contiguous entries, so it may need to wrap the buffer
+// around, adding a noops. Thus this function may change the value of put_. The
+// function will return early if an error occurs, in which case the available
+// space may not be available.
+void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
+ AllocateRingBuffer();
+ if (!usable()) {
+ return;
+ }
+ DCHECK(HaveRingBuffer());
+ DCHECK(count < total_entry_count_);
+ if (put_ + count > total_entry_count_) {
+ // There's not enough room between the current put and the end of the
+ // buffer, so we need to wrap. We will add noops all the way to the end,
+ // but we need to make sure get wraps first, actually that get is 1 or
+ // more (since put will wrap to 0 after we add the noops).
+ DCHECK_LE(1, put_);
+ int32 curr_get = get_offset();
+ if (curr_get > put_ || curr_get == 0) {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
+ Flush();
+ if (!WaitForGetOffsetInRange(1, put_))
+ return;
+ curr_get = get_offset();
+ DCHECK_LE(curr_get, put_);
+ DCHECK_NE(0, curr_get);
+ }
+ // Insert Noops to fill out the buffer.
+ int32 num_entries = total_entry_count_ - put_;
+ while (num_entries > 0) {
+ int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
+ cmd::Noop::Set(&entries_[put_], num_to_skip);
+ put_ += num_to_skip;
+ num_entries -= num_to_skip;
+ }
+ put_ = 0;
+ }
+
+ // Try to get 'count' entries without flushing.
+ CalcImmediateEntries(count);
+ if (immediate_entry_count_ < count) {
+ // Try again with a shallow Flush().
+ Flush();
+ CalcImmediateEntries(count);
+ if (immediate_entry_count_ < count) {
+ // Buffer is full. Need to wait for entries.
+ TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
+ if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
+ return;
+ CalcImmediateEntries(count);
+ DCHECK_GE(immediate_entry_count_, count);
+ }
+ }
+}
+
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h
new file mode 100644
index 0000000..954107f
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -0,0 +1,342 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the command buffer helper class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
+
+#include <string.h>
+#include <time.h>
+
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+#if !defined(OS_ANDROID)
+#define CMD_HELPER_PERIODIC_FLUSH_CHECK
+const int kCommandsPerFlushCheck = 100;
+const int kPeriodicFlushDelayInMicroseconds =
+ base::Time::kMicrosecondsPerSecond / (5 * 60);
+#endif
+
+const int kAutoFlushSmall = 16; // 1/16 of the buffer
+const int kAutoFlushBig = 2; // 1/2 of the buffer
+
+// Command buffer helper class. This class simplifies ring buffer management:
+// it will allocate the buffer, give it to the buffer interface, and let the
+// user add commands to it, while taking care of the synchronization (put and
+// get). It also provides a way to ensure commands have been executed, through
+// the token mechanism:
+//
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// int32 token = helper.InsertToken();
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// [...]
+//
+// helper.WaitForToken(token); // this doesn't return until the first two
+// // commands have been executed.
+class GPU_EXPORT CommandBufferHelper {
+ public:
+ explicit CommandBufferHelper(CommandBuffer* command_buffer);
+ virtual ~CommandBufferHelper();
+
+ // Initializes the CommandBufferHelper.
+ // Parameters:
+ // ring_buffer_size: The size of the ring buffer portion of the command
+ // buffer.
+ bool Initialize(int32 ring_buffer_size);
+
+ // Sets whether the command buffer should automatically flush periodically
+ // to try to increase performance. Defaults to true.
+ void SetAutomaticFlushes(bool enabled);
+
+ // True if the context is lost.
+ bool IsContextLost();
+
+ // Asynchronously flushes the commands, setting the put pointer to let the
+ // buffer interface know that new commands have been added. After a flush
+ // returns, the command buffer service is aware of all pending commands.
+ void Flush();
+
+ // Waits until all the commands have been executed. Returns whether it
+ // was successful. The function will fail if the command buffer service has
+ // disconnected.
+ bool Finish();
+
+ // Waits until a given number of available entries are available.
+ // Parameters:
+ // count: number of entries needed. This value must be at most
+ // the size of the buffer minus one.
+ void WaitForAvailableEntries(int32 count);
+
+ // Inserts a new token into the command buffer. This token either has a value
+ // different from previously inserted tokens, or ensures that previously
+ // inserted tokens with that value have already passed through the command
+ // stream.
+ // Returns:
+ // the value of the new token or -1 if the command buffer reader has
+ // shutdown.
+ int32 InsertToken();
+
+ // Returns true if the token has passed.
+ // Parameters:
+ // the value of the token to check whether it has passed
+ bool HasTokenPassed(int32 token) const {
+ if (token > token_)
+ return true; // we wrapped
+ return last_token_read() >= token;
+ }
+
+ // Waits until the token of a particular value has passed through the command
+ // stream (i.e. commands inserted before that token have been executed).
+ // NOTE: This will call Flush if it needs to block.
+ // Parameters:
+ // the value of the token to wait for.
+ void WaitForToken(int32 token);
+
+ // Called prior to each command being issued. Waits for a certain amount of
+ // space to be available. Returns address of space.
+ void* GetSpace(int32 entries) {
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ // Allow this command buffer to be pre-empted by another if a "reasonable"
+ // amount of work has been done. On highend machines, this reduces the
+ // latency of GPU commands. However, on Android, this can cause the
+ // kernel to thrash between generating GPU commands and executing them.
+ ++commands_issued_;
+ if (flush_automatically_ &&
+ (commands_issued_ % kCommandsPerFlushCheck == 0)) {
+ PeriodicFlushCheck();
+ }
+#endif
+
+ // Test for immediate entries.
+ if (entries > immediate_entry_count_) {
+ WaitForAvailableEntries(entries);
+ if (entries > immediate_entry_count_)
+ return NULL;
+ }
+
+ DCHECK_LE(entries, immediate_entry_count_);
+
+ // Allocate space and advance put_.
+ CommandBufferEntry* space = &entries_[put_];
+ put_ += entries;
+ immediate_entry_count_ -= entries;
+
+ DCHECK_LE(put_, total_entry_count_);
+ return space;
+ }
+
+ template <typename T>
+ void ForceNullCheck(T* data) {
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+ // 64-bit MSVC's alias analysis was determining that the command buffer
+ // entry couldn't be NULL, so it optimized out the NULL check.
+ // Dereferencing the same datatype through a volatile pointer seems to
+ // prevent that from happening. http://crbug.com/361936
+ if (data)
+ static_cast<volatile T*>(data)->header;
+#endif
+ }
+
+ // Typed version of GetSpace. Gets enough room for the given type and returns
+ // a reference to it.
+ template <typename T>
+ T* GetCmdSpace() {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ int32 space_needed = ComputeNumEntries(sizeof(T));
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ // Typed version of GetSpace for immediate commands.
+ template <typename T>
+ T* GetImmediateCmdSpace(size_t data_space) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ // Typed version of GetSpace for immediate commands.
+ template <typename T>
+ T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ int32 space_needed = ComputeNumEntries(total_space);
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ int32 last_token_read() const {
+ return command_buffer_->GetLastToken();
+ }
+
+ int32 get_offset() const {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ // Common Commands
+ void Noop(uint32 skip_count) {
+ cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
+ (skip_count - 1) * sizeof(CommandBufferEntry));
+ if (cmd) {
+ cmd->Init(skip_count);
+ }
+ }
+
+ void SetToken(uint32 token) {
+ cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
+ if (cmd) {
+ cmd->Init(token);
+ }
+ }
+
+ void SetBucketSize(uint32 bucket_id, uint32 size) {
+ cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
+ if (cmd) {
+ cmd->Init(bucket_id, size);
+ }
+ }
+
+ void SetBucketData(uint32 bucket_id,
+ uint32 offset,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ offset,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+ }
+
+ void SetBucketDataImmediate(
+ uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
+ cmd::SetBucketDataImmediate* cmd =
+ GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
+ if (cmd) {
+ cmd->Init(bucket_id, offset, size);
+ memcpy(ImmediateDataAddress(cmd), data, size);
+ }
+ }
+
+ void GetBucketStart(uint32 bucket_id,
+ uint32 result_memory_id,
+ uint32 result_memory_offset,
+ uint32 data_memory_size,
+ uint32 data_memory_id,
+ uint32 data_memory_offset) {
+ cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ result_memory_id,
+ result_memory_offset,
+ data_memory_size,
+ data_memory_id,
+ data_memory_offset);
+ }
+ }
+
+ void GetBucketData(uint32 bucket_id,
+ uint32 offset,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ offset,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+ }
+
+ CommandBuffer* command_buffer() const {
+ return command_buffer_;
+ }
+
+ scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
+
+ uint32 flush_generation() const { return flush_generation_; }
+
+ void FreeRingBuffer();
+
+ bool HaveRingBuffer() const {
+ return ring_buffer_id_ != -1;
+ }
+
+ bool usable () const {
+ return usable_;
+ }
+
+ void ClearUsable() {
+ usable_ = false;
+ CalcImmediateEntries(0);
+ }
+
+ private:
+ // Returns the number of available entries (they may not be contiguous).
+ int32 AvailableEntries() {
+ return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
+ }
+
+ void CalcImmediateEntries(int waiting_count);
+ bool AllocateRingBuffer();
+ void FreeResources();
+
+ // Waits for the get offset to be in a specific range, inclusive. Returns
+ // false if there was an error.
+ bool WaitForGetOffsetInRange(int32 start, int32 end);
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ // Calls Flush if automatic flush conditions are met.
+ void PeriodicFlushCheck();
+#endif
+
+ CommandBuffer* command_buffer_;
+ int32 ring_buffer_id_;
+ int32 ring_buffer_size_;
+ scoped_refptr<gpu::Buffer> ring_buffer_;
+ CommandBufferEntry* entries_;
+ int32 total_entry_count_; // the total number of entries
+ int32 immediate_entry_count_;
+ int32 token_;
+ int32 put_;
+ int32 last_put_sent_;
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ int commands_issued_;
+#endif
+
+ bool usable_;
+ bool context_lost_;
+ bool flush_automatically_;
+
+ base::TimeTicks last_flush_time_;
+
+ // Incremented every time the helper flushes the command buffer.
+ // Can be used to track when prior commands have been flushed.
+ uint32 flush_generation_;
+
+ friend class CommandBufferHelperTest;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
new file mode 100644
index 0000000..6250074
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -0,0 +1,712 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/linked_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+const int32 kTotalNumCommandEntries = 32;
+const int32 kCommandBufferSizeBytes =
+ kTotalNumCommandEntries * sizeof(CommandBufferEntry);
+const int32 kUnusedCommandId = 5; // we use 0 and 2 currently.
+
+// Override CommandBufferService::Flush() to lock flushing and simulate
+// the buffer becoming full in asynchronous mode.
+class CommandBufferServiceLocked : public CommandBufferService {
+ public:
+ explicit CommandBufferServiceLocked(
+ TransferBufferManagerInterface* transfer_buffer_manager)
+ : CommandBufferService(transfer_buffer_manager),
+ flush_locked_(false),
+ last_flush_(-1),
+ flush_count_(0) {}
+ virtual ~CommandBufferServiceLocked() {}
+
+ virtual void Flush(int32 put_offset) OVERRIDE {
+ flush_count_++;
+ if (!flush_locked_) {
+ last_flush_ = -1;
+ CommandBufferService::Flush(put_offset);
+ } else {
+ last_flush_ = put_offset;
+ }
+ }
+
+ void LockFlush() { flush_locked_ = true; }
+
+ void UnlockFlush() { flush_locked_ = false; }
+
+ int FlushCount() { return flush_count_; }
+
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE {
+ if (last_flush_ != -1) {
+ CommandBufferService::Flush(last_flush_);
+ last_flush_ = -1;
+ }
+ CommandBufferService::WaitForGetOffsetInRange(start, end);
+ }
+
+ private:
+ bool flush_locked_;
+ int last_flush_;
+ int flush_count_;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferServiceLocked);
+};
+
+// Test fixture for CommandBufferHelper test - Creates a CommandBufferHelper,
+// using a CommandBufferEngine with a mock AsyncAPIInterface for its interface
+// (calling it directly, not through the RPC mechanism).
+class CommandBufferHelperTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, _, _))
+ .WillRepeatedly(Return(error::kNoError));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferServiceLocked(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+
+ test_command_next_id_ = kUnusedCommandId;
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+ test_command_args_.clear();
+ }
+
+ const CommandParser* GetParser() const {
+ return gpu_scheduler_->parser();
+ }
+
+ int32 ImmediateEntryCount() const { return helper_->immediate_entry_count_; }
+
+ // Adds a command to the buffer through the helper, while adding it as an
+ // expected call on the API mock.
+ void AddCommandWithExpect(error::Error _return,
+ unsigned int command,
+ int arg_count,
+ CommandBufferEntry *args) {
+ CommandHeader header;
+ header.size = arg_count + 1;
+ header.command = command;
+ CommandBufferEntry* cmds =
+ static_cast<CommandBufferEntry*>(helper_->GetSpace(arg_count + 1));
+ CommandBufferOffset put = 0;
+ cmds[put++].value_header = header;
+ for (int ii = 0; ii < arg_count; ++ii) {
+ cmds[put++] = args[ii];
+ }
+
+ EXPECT_CALL(*api_mock_, DoCommand(command, arg_count,
+ Truly(AsyncAPIMock::IsArgs(arg_count, args))))
+ .InSequence(sequence_)
+ .WillOnce(Return(_return));
+ }
+
+ void AddUniqueCommandWithExpect(error::Error _return, int cmd_size) {
+ EXPECT_GE(cmd_size, 1);
+ EXPECT_LT(cmd_size, kTotalNumCommandEntries);
+ int arg_count = cmd_size - 1;
+
+ // Allocate array for args.
+ linked_ptr<std::vector<CommandBufferEntry> > args_ptr(
+ new std::vector<CommandBufferEntry>(arg_count ? arg_count : 1));
+
+ for (int32 ii = 0; ii < arg_count; ++ii) {
+ (*args_ptr)[ii].value_uint32 = 0xF00DF00D + ii;
+ }
+
+ // Add command and save args in test_command_args_ until the test completes.
+ AddCommandWithExpect(
+ _return, test_command_next_id_++, arg_count, &(*args_ptr)[0]);
+ test_command_args_.insert(test_command_args_.end(), args_ptr);
+ }
+
+ void TestCommandWrappingFull(int32 cmd_size, int32 start_commands) {
+ const int32 num_args = cmd_size - 1;
+ EXPECT_EQ(kTotalNumCommandEntries % cmd_size, 0);
+
+ std::vector<CommandBufferEntry> args(num_args);
+ for (int32 ii = 0; ii < num_args; ++ii) {
+ args[ii].value_uint32 = ii + 1;
+ }
+
+ // Initially insert commands up to start_commands and Finish().
+ for (int32 ii = 0; ii < start_commands; ++ii) {
+ AddCommandWithExpect(
+ error::kNoError, ii + kUnusedCommandId, num_args, &args[0]);
+ }
+ helper_->Finish();
+
+ EXPECT_EQ(GetParser()->put(),
+ (start_commands * cmd_size) % kTotalNumCommandEntries);
+ EXPECT_EQ(GetParser()->get(),
+ (start_commands * cmd_size) % kTotalNumCommandEntries);
+
+ // Lock flushing to force the buffer to get full.
+ command_buffer_->LockFlush();
+
+ // Add enough commands to over fill the buffer.
+ for (int32 ii = 0; ii < kTotalNumCommandEntries / cmd_size + 2; ++ii) {
+ AddCommandWithExpect(error::kNoError,
+ start_commands + ii + kUnusedCommandId,
+ num_args,
+ &args[0]);
+ }
+
+ // Flush all commands.
+ command_buffer_->UnlockFlush();
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+ }
+
+ // Checks that the buffer from put to put+size is free in the parser.
+ void CheckFreeSpace(CommandBufferOffset put, unsigned int size) {
+ CommandBufferOffset parser_put = GetParser()->put();
+ CommandBufferOffset parser_get = GetParser()->get();
+ CommandBufferOffset limit = put + size;
+ if (parser_get > parser_put) {
+ // "busy" buffer wraps, so "free" buffer is between put (inclusive) and
+ // get (exclusive).
+ EXPECT_LE(parser_put, put);
+ EXPECT_GT(parser_get, limit);
+ } else {
+ // "busy" buffer does not wrap, so the "free" buffer is the top side (from
+ // put to the limit) and the bottom side (from 0 to get).
+ if (put >= parser_put) {
+ // we're on the top side, check we are below the limit.
+ EXPECT_GE(kTotalNumCommandEntries, limit);
+ } else {
+ // we're on the bottom side, check we are below get.
+ EXPECT_GT(parser_get, limit);
+ }
+ }
+ }
+
+ int32 GetGetOffset() {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ int32 GetPutOffset() {
+ return command_buffer_->GetLastState().put_offset;
+ }
+
+ int32 GetHelperGetOffset() { return helper_->get_offset(); }
+
+ int32 GetHelperPutOffset() { return helper_->put_; }
+
+ uint32 GetHelperFlushGeneration() { return helper_->flush_generation(); }
+
+ error::Error GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+ CommandBufferOffset get_helper_put() { return helper_->put_; }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferServiceLocked> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ std::list<linked_ptr<std::vector<CommandBufferEntry> > > test_command_args_;
+ unsigned int test_command_next_id_;
+ Sequence sequence_;
+};
+
+// Checks immediate_entry_count_ changes based on 'usable' state.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNotUsable) {
+ // Auto flushing mode is tested separately.
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(helper_->usable(), true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ helper_->ClearUsable();
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+}
+
+// Checks immediate_entry_count_ changes based on RingBuffer state.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNoRingBuffer) {
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ helper_->FreeRingBuffer();
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+}
+
+// Checks immediate_entry_count_ calc when Put >= Get and Get == 0.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetAtZero) {
+ // No internal auto flushing.
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Immediate count should be 1 less than the end of the buffer.
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 3);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when Put >= Get and Get > 0.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetInMiddle) {
+ // No internal auto flushing.
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+
+ // Move to Get = Put = 2.
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ helper_->Finish();
+ EXPECT_EQ(GetHelperPutOffset(), 2);
+ EXPECT_EQ(GetHelperGetOffset(), 2);
+
+ // Immediate count should be up to the end of the buffer.
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 2);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 4);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when Put < Get.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetBeforePut) {
+ // Move to Get = kTotalNumCommandEntries / 4, Put = 0.
+ const int kInitGetOffset = kTotalNumCommandEntries / 4;
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+ AddUniqueCommandWithExpect(error::kNoError, kInitGetOffset);
+ helper_->Finish();
+ AddUniqueCommandWithExpect(error::kNoError,
+ kTotalNumCommandEntries - kInitGetOffset);
+
+ // Flush instead of Finish will let Put wrap without the command buffer
+ // immediately processing the data between Get and Put.
+ helper_->Flush();
+
+ EXPECT_EQ(GetHelperGetOffset(), kInitGetOffset);
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+
+ // Immediate count should be up to Get - 1.
+ EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 1);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 3);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when automatic flushing is enabled.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesAutoFlushing) {
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Without auto flushes, up to kTotalNumCommandEntries - 1 is available.
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+
+ // With auto flushes, and Get == Last Put,
+ // up to kTotalNumCommandEntries / kAutoFlushSmall is available.
+ helper_->SetAutomaticFlushes(true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall);
+
+ // With auto flushes, and Get != Last Put,
+ // up to kTotalNumCommandEntries / kAutoFlushBig is available.
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ helper_->Flush();
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushBig);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when automatic flushing is enabled, and
+// we allocate commands over the immediate_entry_count_ size.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesOverFlushLimit) {
+ // Lock internal flushing.
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Pre-check ImmediateEntryCount is limited with automatic flushing enabled.
+ helper_->SetAutomaticFlushes(true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall);
+
+ // Add a command larger than ImmediateEntryCount().
+ AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1);
+
+ // ImmediateEntryCount() should now be 0, to force a flush check on the next
+ // command.
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+
+ // Add a command when ImmediateEntryCount() == 0.
+ AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that commands in the buffer are properly executed, and that the
+// status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandProcessing) {
+ // Check initial state of the engine - it should have been configured by the
+ // helper.
+ EXPECT_TRUE(GetParser() != NULL);
+ EXPECT_EQ(error::kNoError, GetError());
+ EXPECT_EQ(0, GetGetOffset());
+
+ // Add 3 commands through the helper
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 0, NULL);
+
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 3;
+ args1[1].value_float = 4.f;
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args1);
+
+ CommandBufferEntry args2[2];
+ args2[0].value_uint32 = 5;
+ args2[1].value_float = 6.f;
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args2);
+
+ // Wait until it's done.
+ helper_->Finish();
+ // Check that the engine has no more work to do.
+ EXPECT_TRUE(GetParser()->IsEmpty());
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that commands in the buffer are properly executed when wrapping the
+// buffer, and that the status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandWrapping) {
+ // Add num_commands * commands of size 3 through the helper to make sure we
+ // do wrap. kTotalNumCommandEntries must not be a multiple of 3.
+ COMPILE_ASSERT(kTotalNumCommandEntries % 3 != 0,
+ Is_multiple_of_num_command_entries);
+ const int kNumCommands = (kTotalNumCommandEntries / 3) * 2;
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 5;
+ args1[1].value_float = 4.f;
+
+ for (int i = 0; i < kNumCommands; ++i) {
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + i, 2, args1);
+ }
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks the case where the command inserted exactly matches the space left in
+// the command buffer.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingExactMultiple) {
+ const int32 kCommandSize = kTotalNumCommandEntries / 2;
+ const size_t kNumArgs = kCommandSize - 1;
+ COMPILE_ASSERT(kTotalNumCommandEntries % kCommandSize == 0,
+ Not_multiple_of_num_command_entries);
+ CommandBufferEntry args1[kNumArgs];
+ for (size_t ii = 0; ii < kNumArgs; ++ii) {
+ args1[ii].value_uint32 = ii + 1;
+ }
+
+ for (unsigned int i = 0; i < 5; ++i) {
+ AddCommandWithExpect(
+ error::kNoError, i + kUnusedCommandId, kNumArgs, args1);
+ }
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks exact wrapping condition with Get = 0.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtStart) {
+ TestCommandWrappingFull(2, 0);
+}
+
+// Checks exact wrapping condition with 0 < Get < kTotalNumCommandEntries.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullInMiddle) {
+ TestCommandWrappingFull(2, 1);
+}
+
+// Checks exact wrapping condition with Get = kTotalNumCommandEntries.
+// Get should wrap back to 0, but making sure.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtEnd) {
+ TestCommandWrappingFull(2, kTotalNumCommandEntries / 2);
+}
+
+// Checks that asking for available entries work, and that the parser
+// effectively won't use that space.
+TEST_F(CommandBufferHelperTest, TestAvailableEntries) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add 2 commands through the helper - 8 entries
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 1, 0, NULL);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 2, 0, NULL);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+
+ // Ask for 5 entries.
+ helper_->WaitForAvailableEntries(5);
+
+ CommandBufferOffset put = get_helper_put();
+ CheckFreeSpace(put, 5);
+
+ // Add more commands.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 5, 2, args);
+
+ // Wait until everything is done done.
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that the InsertToken/WaitForToken work.
+TEST_F(CommandBufferHelperTest, TestToken) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add a first command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ // keep track of the buffer position.
+ CommandBufferOffset command1_put = get_helper_put();
+ int32 token = helper_->InsertToken();
+
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+ // Add another command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+ helper_->WaitForToken(token);
+ // check that the get pointer is beyond the first command.
+ EXPECT_LE(command1_put, GetGetOffset());
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks WaitForToken doesn't Flush if token is already read.
+TEST_F(CommandBufferHelperTest, TestWaitForTokenFlush) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add a first command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ int32 token = helper_->InsertToken();
+
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ int flush_count = command_buffer_->FlushCount();
+
+ // Test that waiting for pending token causes a Flush.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ // Test that we don't Flush repeatedly.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ // Add another command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+
+ // Test that we don't Flush repeatedly even if commands are pending.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+
+ // Test freeing ring buffer.
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Test that InsertToken allocates a new one
+ int32 token = helper_->InsertToken();
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+ helper_->WaitForToken(token);
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Test that WaitForAvailableEntries allocates a new one
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 0, NULL);
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+ helper_->Finish();
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+}
+
+TEST_F(CommandBufferHelperTest, Noop) {
+ for (int ii = 1; ii < 4; ++ii) {
+ CommandBufferOffset put_before = get_helper_put();
+ helper_->Noop(ii);
+ CommandBufferOffset put_after = get_helper_put();
+ EXPECT_EQ(ii, put_after - put_before);
+ }
+}
+
+TEST_F(CommandBufferHelperTest, IsContextLost) {
+ EXPECT_FALSE(helper_->IsContextLost());
+ command_buffer_->SetParseError(error::kGenericError);
+ EXPECT_TRUE(helper_->IsContextLost());
+}
+
+// Checks helper's 'flush generation' updates.
+TEST_F(CommandBufferHelperTest, TestFlushGeneration) {
+ // Explicit flushing only.
+ helper_->SetAutomaticFlushes(false);
+
+ // Generation should change after Flush() but not before.
+ uint32 gen1, gen2, gen3;
+
+ gen1 = GetHelperFlushGeneration();
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ gen2 = GetHelperFlushGeneration();
+ helper_->Flush();
+ gen3 = GetHelperFlushGeneration();
+ EXPECT_EQ(gen2, gen1);
+ EXPECT_NE(gen3, gen2);
+
+ // Generation should change after Finish() but not before.
+ gen1 = GetHelperFlushGeneration();
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ gen2 = GetHelperFlushGeneration();
+ helper_->Finish();
+ gen3 = GetHelperFlushGeneration();
+ EXPECT_EQ(gen2, gen1);
+ EXPECT_NE(gen3, gen2);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/context_support.h b/gpu/command_buffer/client/context_support.h
new file mode 100644
index 0000000..2678ba9
--- /dev/null
+++ b/gpu/command_buffer/client/context_support.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
+
+#include "base/callback.h"
+#include "ui/gfx/overlay_transform.h"
+#include "ui/gfx/rect.h"
+
+namespace gpu {
+
+class ContextSupport {
+ public:
+ // Runs |callback| when a sync point is reached.
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) = 0;
+
+ // Runs |callback| when a query created via glCreateQueryEXT() has cleared
+ // passed the glEndQueryEXT() point.
+ virtual void SignalQuery(uint32 query, const base::Closure& callback) = 0;
+
+ // For onscreen contexts, indicates that the surface visibility has changed.
+ // Clients aren't expected to draw to an invisible surface.
+ virtual void SetSurfaceVisible(bool visible) = 0;
+
+ virtual void Swap() = 0;
+ virtual void PartialSwapBuffers(const gfx::Rect& sub_buffer) = 0;
+
+ // Schedule a texture to be presented as an overlay synchronously with the
+ // primary surface during the next buffer swap.
+ // This method is not stateful and needs to be re-scheduled every frame.
+ virtual void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) = 0;
+
+ virtual uint32 InsertFutureSyncPointCHROMIUM() = 0;
+ virtual void RetireSyncPointCHROMIUM(uint32 sync_point) = 0;
+
+ protected:
+ ContextSupport() {}
+ virtual ~ContextSupport() {}
+};
+
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
new file mode 100644
index 0000000..8003857
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the FencedAllocator class.
+
+#include "gpu/command_buffer/client/fenced_allocator.h"
+
+#include <algorithm>
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+namespace {
+
+// Allocation alignment, must be a power of two.
+const unsigned int kAllocAlignment = 16;
+
+// Round down to the largest multiple of kAllocAlignment no greater than |size|.
+unsigned int RoundDown(unsigned int size) {
+ return size & ~(kAllocAlignment - 1);
+}
+
+// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
+unsigned int RoundUp(unsigned int size) {
+ return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+}
+
+} // namespace
+
+#ifndef _MSC_VER
+const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
+#endif
+
+FencedAllocator::FencedAllocator(unsigned int size,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
+ : helper_(helper),
+ poll_callback_(poll_callback),
+ bytes_in_use_(0) {
+ Block block = { FREE, 0, RoundDown(size), kUnusedToken };
+ blocks_.push_back(block);
+}
+
+FencedAllocator::~FencedAllocator() {
+ // Free blocks pending tokens.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state == FREE_PENDING_TOKEN) {
+ i = WaitForTokenAndFreeBlock(i);
+ }
+ }
+
+ DCHECK_EQ(blocks_.size(), 1u);
+ DCHECK_EQ(blocks_[0].state, FREE);
+}
+
+// Looks for a non-allocated block that is big enough. Search in the FREE
+// blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
+// blocks, waiting for them. The current implementation isn't smart about
+// optimizing what to wait for, just looks inside the block in order (first-fit
+// as well).
+FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
+ // size of 0 is not allowed because it would be inconsistent to only sometimes
+ // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
+ if (size == 0) {
+ return kInvalidOffset;
+ }
+
+ // Round up the allocation size to ensure alignment.
+ size = RoundUp(size);
+
+ // Try first to allocate in a free block.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE && block.size >= size) {
+ return AllocInBlock(i, size);
+ }
+ }
+
+ // No free block is available. Look for blocks pending tokens, and wait for
+ // them to be re-usable.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state != FREE_PENDING_TOKEN)
+ continue;
+ i = WaitForTokenAndFreeBlock(i);
+ if (blocks_[i].size >= size)
+ return AllocInBlock(i, size);
+ }
+ return kInvalidOffset;
+}
+
+// Looks for the corresponding block, mark it FREE, and collapse it if
+// necessary.
+void FencedAllocator::Free(FencedAllocator::Offset offset) {
+ BlockIndex index = GetBlockByOffset(offset);
+ DCHECK_NE(blocks_[index].state, FREE);
+ Block &block = blocks_[index];
+
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
+
+ block.state = FREE;
+ CollapseFreeBlock(index);
+}
+
+// Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
+void FencedAllocator::FreePendingToken(
+ FencedAllocator::Offset offset, int32 token) {
+ BlockIndex index = GetBlockByOffset(offset);
+ Block &block = blocks_[index];
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
+ block.state = FREE_PENDING_TOKEN;
+ block.token = token;
+}
+
+// Gets the max of the size of the blocks marked as free.
+unsigned int FencedAllocator::GetLargestFreeSize() {
+ FreeUnused();
+ unsigned int max_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE)
+ max_size = std::max(max_size, block.size);
+ }
+ return max_size;
+}
+
+// Gets the size of the largest segment of blocks that are either FREE or
+// FREE_PENDING_TOKEN.
+unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
+ unsigned int max_size = 0;
+ unsigned int current_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == IN_USE) {
+ max_size = std::max(max_size, current_size);
+ current_size = 0;
+ } else {
+ DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
+ current_size += block.size;
+ }
+ }
+ return std::max(max_size, current_size);
+}
+
+// Makes sure that:
+// - there is at least one block.
+// - there are no contiguous FREE blocks (they should have been collapsed).
+// - the successive offsets match the block sizes, and they are in order.
+bool FencedAllocator::CheckConsistency() {
+ if (blocks_.size() < 1) return false;
+ for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
+ Block ¤t = blocks_[i];
+ Block &next = blocks_[i + 1];
+ // This test is NOT included in the next one, because offset is unsigned.
+ if (next.offset <= current.offset)
+ return false;
+ if (next.offset != current.offset + current.size)
+ return false;
+ if (current.state == FREE && next.state == FREE)
+ return false;
+ }
+ return true;
+}
+
+// Returns false if all blocks are actually FREE, in which
+// case they would be coalesced into one block, true otherwise.
+bool FencedAllocator::InUse() {
+ return blocks_.size() != 1 || blocks_[0].state != FREE;
+}
+
+// Collapse the block to the next one, then to the previous one. Provided the
+// structure is consistent, those are the only blocks eligible for collapse.
+FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
+ BlockIndex index) {
+ if (index + 1 < blocks_.size()) {
+ Block &next = blocks_[index + 1];
+ if (next.state == FREE) {
+ blocks_[index].size += next.size;
+ blocks_.erase(blocks_.begin() + index + 1);
+ }
+ }
+ if (index > 0) {
+ Block &prev = blocks_[index - 1];
+ if (prev.state == FREE) {
+ prev.size += blocks_[index].size;
+ blocks_.erase(blocks_.begin() + index);
+ --index;
+ }
+ }
+ return index;
+}
+
+// Waits for the block's token, then mark the block as free, then collapse it.
+FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
+ BlockIndex index) {
+ Block &block = blocks_[index];
+ DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
+ helper_->WaitForToken(block.token);
+ block.state = FREE;
+ return CollapseFreeBlock(index);
+}
+
+// Frees any blocks pending a token for which the token has been read.
+void FencedAllocator::FreeUnused() {
+ // Free any potential blocks that has its lifetime handled outside.
+ poll_callback_.Run();
+
+ for (unsigned int i = 0; i < blocks_.size();) {
+ Block& block = blocks_[i];
+ if (block.state == FREE_PENDING_TOKEN &&
+ helper_->HasTokenPassed(block.token)) {
+ block.state = FREE;
+ i = CollapseFreeBlock(i);
+ } else {
+ ++i;
+ }
+ }
+}
+
+// If the block is exactly the requested size, simply mark it IN_USE, otherwise
+// split it and mark the first one (of the requested size) IN_USE.
+FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
+ unsigned int size) {
+ Block &block = blocks_[index];
+ DCHECK_GE(block.size, size);
+ DCHECK_EQ(block.state, FREE);
+ Offset offset = block.offset;
+ bytes_in_use_ += size;
+ if (block.size == size) {
+ block.state = IN_USE;
+ return offset;
+ }
+ Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
+ block.state = IN_USE;
+ block.size = size;
+ // this is the last thing being done because it may invalidate block;
+ blocks_.insert(blocks_.begin() + index + 1, newblock);
+ return offset;
+}
+
+// The blocks are in offset order, so we can do a binary search.
+FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
+ Block templ = { IN_USE, offset, 0, kUnusedToken };
+ Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
+ templ, OffsetCmp());
+ DCHECK(it != blocks_.end() && it->offset == offset);
+ return it-blocks_.begin();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/fenced_allocator.h b/gpu/command_buffer/client/fenced_allocator.h
new file mode 100644
index 0000000..8e222e1
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator.h
@@ -0,0 +1,266 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the FencedAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+class CommandBufferHelper;
+
+// FencedAllocator provides a mechanism to manage allocations within a fixed
+// block of memory (storing the book-keeping externally). Furthermore this
+// class allows to free data "pending" the passage of a command buffer token,
+// that is, the memory won't be reused until the command buffer has processed
+// that token.
+//
+// NOTE: Although this class is intended to be used in the command buffer
+// environment which is multi-process, this class isn't "thread safe", because
+// it isn't meant to be shared across modules. It is thread-compatible though
+// (see http://www.corp.google.com/eng/doc/cpp_primer.html#thread_safety).
+class GPU_EXPORT FencedAllocator {
+ public:
+ typedef unsigned int Offset;
+ // Invalid offset, returned by Alloc in case of failure.
+ static const Offset kInvalidOffset = 0xffffffffU;
+
+ // Creates a FencedAllocator. Note that the size of the buffer is passed, but
+ // not its base address: everything is handled as offsets into the buffer.
+ FencedAllocator(unsigned int size,
+ CommandBufferHelper *helper,
+ const base::Closure& poll_callback);
+
+ ~FencedAllocator();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the offset of the allocated memory block, or kInvalidOffset if out of
+ // memory.
+ Offset Alloc(unsigned int size);
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ void Free(Offset offset);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(Offset offset, int32 token);
+
+ // Frees any blocks pending a token for which the token has been read.
+ void FreeUnused();
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize();
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency();
+
+ // True if any memory is allocated.
+ bool InUse();
+
+ // Return bytes of memory that is IN_USE
+ size_t bytes_in_use() const { return bytes_in_use_; }
+
+ private:
+ // Status of a block of memory, for book-keeping.
+ enum State {
+ IN_USE,
+ FREE,
+ FREE_PENDING_TOKEN
+ };
+
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ State state;
+ Offset offset;
+ unsigned int size;
+ int32_t token; // token to wait for in the FREE_PENDING_TOKEN case.
+ };
+
+ // Comparison functor for memory block sorting.
+ class OffsetCmp {
+ public:
+ bool operator() (const Block &left, const Block &right) {
+ return left.offset < right.offset;
+ }
+ };
+
+ typedef std::vector<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ static const int32_t kUnusedToken = 0;
+
+ // Gets the index of a memory block, given its offset.
+ BlockIndex GetBlockByOffset(Offset offset);
+
+ // Collapse a free block with its neighbours if they are free. Returns the
+ // index of the collapsed block.
+ // NOTE: this will invalidate block indices.
+ BlockIndex CollapseFreeBlock(BlockIndex index);
+
+ // Waits for a FREE_PENDING_TOKEN block to be usable, and free it. Returns
+ // the new index of that block (since it may have been collapsed).
+ // NOTE: this will invalidate block indices.
+ BlockIndex WaitForTokenAndFreeBlock(BlockIndex index);
+
+ // Allocates a block of memory inside a given block, splitting it in two
+ // (unless that block is of the exact requested size).
+ // NOTE: this will invalidate block indices.
+ // Returns the offset of the allocated block (NOTE: this is different from
+ // the other functions that return a block index).
+ Offset AllocInBlock(BlockIndex index, unsigned int size);
+
+ CommandBufferHelper *helper_;
+ base::Closure poll_callback_;
+ Container blocks_;
+ size_t bytes_in_use_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
+};
+
+// This class functions just like FencedAllocator, but its API uses pointers
+// instead of offsets.
+class FencedAllocatorWrapper {
+ public:
+ FencedAllocatorWrapper(unsigned int size,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ void* base)
+ : allocator_(size, helper, poll_callback),
+ base_(base) { }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void *Alloc(unsigned int size) {
+ FencedAllocator::Offset offset = allocator_.Alloc(size);
+ return GetPointer(offset);
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ // This is a type-safe version of Alloc, returning a typed pointer.
+ //
+ // Parameters:
+ // count: the number of elements to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ template <typename T> T *AllocTyped(unsigned int count) {
+ return static_cast<T *>(Alloc(count * sizeof(T)));
+ }
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void *pointer) {
+ DCHECK(pointer);
+ allocator_.Free(GetOffset(pointer));
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void *pointer, int32 token) {
+ DCHECK(pointer);
+ allocator_.FreePendingToken(GetOffset(pointer), token);
+ }
+
+ // Frees any blocks pending a token for which the token has been read.
+ void FreeUnused() {
+ allocator_.FreeUnused();
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ // It translates FencedAllocator::kInvalidOffset to NULL.
+ void *GetPointer(FencedAllocator::Offset offset) {
+ return (offset == FencedAllocator::kInvalidOffset) ?
+ NULL : static_cast<char *>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ // It translates NULL to FencedAllocator::kInvalidOffset.
+ FencedAllocator::Offset GetOffset(void *pointer) {
+ return pointer ?
+ static_cast<FencedAllocator::Offset>(
+ static_cast<char*>(pointer) - static_cast<char*>(base_)) :
+ FencedAllocator::kInvalidOffset;
+ }
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize() {
+ return allocator_.GetLargestFreeSize();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency() {
+ return allocator_.CheckConsistency();
+ }
+
+ // True if any memory is allocated.
+ bool InUse() {
+ return allocator_.InUse();
+ }
+
+ FencedAllocator &allocator() { return allocator_; }
+
+ size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+
+ private:
+ FencedAllocator allocator_;
+ void* base_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocatorWrapper);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc
new file mode 100644
index 0000000..e746be6
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -0,0 +1,645 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the FencedAllocator class.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/aligned_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/client/fenced_allocator.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::_;
+
+class BaseFencedAllocatorTest : public testing::Test {
+ protected:
+ static const unsigned int kBufferSize = 1024;
+ static const int kAllocAlignment = 16;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef _MSC_VER
+const unsigned int BaseFencedAllocatorTest::kBufferSize;
+#endif
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+// Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class FencedAllocatorTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+ allocator_.reset(new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocator> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorTest, TestBasic) {
+ allocator_->CheckConsistency();
+ EXPECT_FALSE(allocator_->InUse());
+
+ const unsigned int kSize = 16;
+ FencedAllocator::Offset offset = allocator_->Alloc(kSize);
+ EXPECT_TRUE(allocator_->InUse());
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_GE(kBufferSize, offset+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(offset);
+ EXPECT_FALSE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Test alloc 0 fails.
+TEST_F(FencedAllocatorTest, TestAllocZero) {
+ FencedAllocator::Offset offset = allocator_->Alloc(0);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_FALSE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorTest, TestOutOfMemory) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(offsets[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ offsets[0] = allocator_->Alloc(kSize/2);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorTest, TestFreePendingToken) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ offsets[0] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism using FreeUnused
+TEST_F(FencedAllocatorTest, FreeUnused) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+ EXPECT_TRUE(allocator_->InUse());
+
+ // No memory should be available.
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Force the command buffer to process the token.
+ helper_->Finish();
+
+ // Tell the allocator to update what's available based on the current token.
+ allocator_->FreeUnused();
+
+ // Check that the new largest free size takes into account the unused block.
+ EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
+
+ // Free two more.
+ token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[1], token);
+ token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[2], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Check that nothing has changed.
+ EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
+
+ // Force the command buffer to process the token.
+ helper_->Finish();
+
+ // Tell the allocator to update what's available based on the current token.
+ allocator_->FreeUnused();
+
+ // Check that the new largest free size takes into account the unused blocks.
+ EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
+ EXPECT_TRUE(allocator_->InUse());
+
+ // Free up everything.
+ for (unsigned int i = 3; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+ EXPECT_FALSE(allocator_->InUse());
+}
+
+// Tests GetLargestFreeSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocated "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
+
+ offset = allocator_->Alloc(kBufferSize - 3 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
+
+ offset1 = allocator_->Alloc(2 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ allocator_->Free(offset2);
+}
+
+// Tests GetLargestFreeOrPendingSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocates "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeOrPendingSize());
+
+ // Free the last one, pending a token.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offset2, token);
+
+ // Now all the buffers have been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // .. but one is still waiting for the token.
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeSize());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed, but it will succeed.
+ offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+ allocator_->Free(offset);
+
+ // Everything now has been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // ... for real.
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+}
+
+class FencedAllocatorPollTest : public BaseFencedAllocatorTest {
+ public:
+ static const unsigned int kAllocSize = 128;
+
+ MOCK_METHOD0(MockedPoll, void());
+
+ protected:
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+};
+
+TEST_F(FencedAllocatorPollTest, TestPoll) {
+ scoped_ptr<FencedAllocator> allocator(
+ new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&FencedAllocatorPollTest::MockedPoll,
+ base::Unretained(this))));
+
+ FencedAllocator::Offset mem1 = allocator->Alloc(kAllocSize);
+ FencedAllocator::Offset mem2 = allocator->Alloc(kAllocSize);
+ EXPECT_NE(mem1, FencedAllocator::kInvalidOffset);
+ EXPECT_NE(mem2, FencedAllocator::kInvalidOffset);
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that no-op Poll doesn't affect the state.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that freeing in Poll works.
+ base::Closure free_mem1_closure =
+ base::Bind(&FencedAllocator::Free,
+ base::Unretained(allocator.get()),
+ mem1);
+ EXPECT_CALL(*this, MockedPoll())
+ .WillOnce(InvokeWithoutArgs(&free_mem1_closure, &base::Closure::Run))
+ .RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 1);
+
+ // Check that freeing still works.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->Free(mem2);
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), 0u);
+
+ allocator.reset();
+}
+
+// Test fixture for FencedAllocatorWrapper test - Creates a
+// FencedAllocatorWrapper, using a CommandBufferHelper with a mock
+// AsyncAPIInterface for its interface (calling it directly, not through the
+// RPC mechanism), making sure Noops are ignored and SetToken are properly
+// forwarded to the engine.
+class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+
+ // Though allocating this buffer isn't strictly necessary, it makes
+ // allocations point to valid addresses, so they could be used for
+ // something.
+ buffer_.reset(static_cast<char*>(base::AlignedAlloc(
+ kBufferSize, kAllocAlignment)));
+ allocator_.reset(new FencedAllocatorWrapper(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll),
+ buffer_.get()));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocatorWrapper> allocator_;
+ scoped_ptr<char, base::AlignedFreeDeleter> buffer_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorWrapperTest, TestBasic) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ void *pointer = allocator_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
+ EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ char *pointer_char = allocator_->AllocTyped<char>(kSize);
+ ASSERT_TRUE(pointer_char);
+ EXPECT_LE(buffer_.get(), pointer_char);
+ EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
+ allocator_->Free(pointer_char);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
+ ASSERT_TRUE(pointer_uint);
+ EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
+ EXPECT_GE(buffer_.get() + kBufferSize,
+ reinterpret_cast<char *>(pointer_uint + kSize));
+
+ // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
+ // directly, except from the remaining size.
+ EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
+ allocator_->GetLargestFreeSize());
+ allocator_->Free(pointer_uint);
+}
+
+// Test alloc 0 fails.
+TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
+ allocator_->CheckConsistency();
+
+ void *pointer = allocator_->Alloc(0);
+ ASSERT_FALSE(pointer);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks that allocation offsets are aligned to multiples of 16 bytes.
+TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize1 = 75;
+ void *pointer1 = allocator_->Alloc(kSize1);
+ ASSERT_TRUE(pointer1);
+ EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize2 = 43;
+ void *pointer2 = allocator_->Alloc(kSize2);
+ ASSERT_TRUE(pointer2);
+ EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer2);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer1);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ pointers[0] = allocator_->Alloc(kSize/2);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(pointers[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ pointers[0] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gl_in_process_context.cc b/gpu/command_buffer/client/gl_in_process_context.cc
new file mode 100644
index 0000000..1441d03
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gl_in_process_context.h"
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <GLES2/gl2.h>
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES 1
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_image.h"
+
+#if defined(OS_ANDROID)
+#include "ui/gl/android/surface_texture.h"
+#endif
+
+namespace gpu {
+
+namespace {
+
+const int32 kDefaultCommandBufferSize = 1024 * 1024;
+const unsigned int kDefaultStartTransferBufferSize = 4 * 1024 * 1024;
+const unsigned int kDefaultMinTransferBufferSize = 1 * 256 * 1024;
+const unsigned int kDefaultMaxTransferBufferSize = 16 * 1024 * 1024;
+
+class GLInProcessContextImpl
+ : public GLInProcessContext,
+ public base::SupportsWeakPtr<GLInProcessContextImpl> {
+ public:
+ explicit GLInProcessContextImpl(
+ const GLInProcessContextSharedMemoryLimits& mem_limits);
+ virtual ~GLInProcessContextImpl();
+
+ bool Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ bool use_global_share_group,
+ GLInProcessContext* share_context,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const scoped_refptr<InProcessCommandBuffer::Service>& service);
+
+ // GLInProcessContext implementation:
+ virtual void SetContextLostCallback(const base::Closure& callback) OVERRIDE;
+ virtual gles2::GLES2Implementation* GetImplementation() OVERRIDE;
+ virtual size_t GetMappedMemoryLimit() OVERRIDE;
+
+#if defined(OS_ANDROID)
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id) OVERRIDE;
+#endif
+
+ private:
+ void Destroy();
+ void OnContextLost();
+ void OnSignalSyncPoint(const base::Closure& callback);
+
+ scoped_ptr<gles2::GLES2CmdHelper> gles2_helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ scoped_ptr<gles2::GLES2Implementation> gles2_implementation_;
+ scoped_ptr<InProcessCommandBuffer> command_buffer_;
+
+ const GLInProcessContextSharedMemoryLimits mem_limits_;
+ bool context_lost_;
+ base::Closure context_lost_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLInProcessContextImpl);
+};
+
+base::LazyInstance<base::Lock> g_all_shared_contexts_lock =
+ LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<std::set<GLInProcessContextImpl*> > g_all_shared_contexts =
+ LAZY_INSTANCE_INITIALIZER;
+
+GLInProcessContextImpl::GLInProcessContextImpl(
+ const GLInProcessContextSharedMemoryLimits& mem_limits)
+ : mem_limits_(mem_limits), context_lost_(false) {
+}
+
+GLInProcessContextImpl::~GLInProcessContextImpl() {
+ {
+ base::AutoLock lock(g_all_shared_contexts_lock.Get());
+ g_all_shared_contexts.Get().erase(this);
+ }
+ Destroy();
+}
+
+gles2::GLES2Implementation* GLInProcessContextImpl::GetImplementation() {
+ return gles2_implementation_.get();
+}
+
+size_t GLInProcessContextImpl::GetMappedMemoryLimit() {
+ return mem_limits_.mapped_memory_reclaim_limit;
+}
+
+void GLInProcessContextImpl::SetContextLostCallback(
+ const base::Closure& callback) {
+ context_lost_callback_ = callback;
+}
+
+void GLInProcessContextImpl::OnContextLost() {
+ context_lost_ = true;
+ if (!context_lost_callback_.is_null()) {
+ context_lost_callback_.Run();
+ }
+}
+
+bool GLInProcessContextImpl::Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ bool use_global_share_group,
+ GLInProcessContext* share_context,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const scoped_refptr<InProcessCommandBuffer::Service>& service) {
+ DCHECK(!use_global_share_group || !share_context);
+ DCHECK(size.width() >= 0 && size.height() >= 0);
+
+ std::vector<int32> attrib_vector;
+ attribs.Serialize(&attrib_vector);
+
+ base::Closure wrapped_callback =
+ base::Bind(&GLInProcessContextImpl::OnContextLost, AsWeakPtr());
+ command_buffer_.reset(new InProcessCommandBuffer(service));
+
+ scoped_ptr<base::AutoLock> scoped_shared_context_lock;
+ scoped_refptr<gles2::ShareGroup> share_group;
+ InProcessCommandBuffer* share_command_buffer = NULL;
+ if (use_global_share_group) {
+ scoped_shared_context_lock.reset(
+ new base::AutoLock(g_all_shared_contexts_lock.Get()));
+ for (std::set<GLInProcessContextImpl*>::const_iterator it =
+ g_all_shared_contexts.Get().begin();
+ it != g_all_shared_contexts.Get().end();
+ it++) {
+ const GLInProcessContextImpl* context = *it;
+ if (!context->context_lost_) {
+ share_group = context->gles2_implementation_->share_group();
+ share_command_buffer = context->command_buffer_.get();
+ DCHECK(share_group.get());
+ DCHECK(share_command_buffer);
+ break;
+ }
+ }
+ } else if (share_context) {
+ GLInProcessContextImpl* impl =
+ static_cast<GLInProcessContextImpl*>(share_context);
+ share_group = impl->gles2_implementation_->share_group();
+ share_command_buffer = impl->command_buffer_.get();
+ DCHECK(share_group.get());
+ DCHECK(share_command_buffer);
+ }
+
+ if (!command_buffer_->Initialize(surface,
+ is_offscreen,
+ window,
+ size,
+ attrib_vector,
+ gpu_preference,
+ wrapped_callback,
+ share_command_buffer)) {
+ LOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
+ return false;
+ }
+
+ // Create the GLES2 helper, which writes the command buffer protocol.
+ gles2_helper_.reset(new gles2::GLES2CmdHelper(command_buffer_.get()));
+ if (!gles2_helper_->Initialize(mem_limits_.command_buffer_size)) {
+ LOG(ERROR) << "Failed to initialize GLES2CmdHelper";
+ Destroy();
+ return false;
+ }
+
+ // Create a transfer buffer.
+ transfer_buffer_.reset(new TransferBuffer(gles2_helper_.get()));
+
+ // Check for consistency.
+ DCHECK(!attribs.bind_generates_resource);
+ bool bind_generates_resource = false;
+
+ // Create the object exposing the OpenGL API.
+ gles2_implementation_.reset(
+ new gles2::GLES2Implementation(gles2_helper_.get(),
+ share_group.get(),
+ transfer_buffer_.get(),
+ bind_generates_resource,
+ attribs.lose_context_when_out_of_memory,
+ command_buffer_.get()));
+
+ if (use_global_share_group) {
+ g_all_shared_contexts.Get().insert(this);
+ scoped_shared_context_lock.reset();
+ }
+
+ if (!gles2_implementation_->Initialize(
+ mem_limits_.start_transfer_buffer_size,
+ mem_limits_.min_transfer_buffer_size,
+ mem_limits_.max_transfer_buffer_size,
+ mem_limits_.mapped_memory_reclaim_limit)) {
+ return false;
+ }
+
+ return true;
+}
+
+void GLInProcessContextImpl::Destroy() {
+ if (gles2_implementation_) {
+ // First flush the context to ensure that any pending frees of resources
+ // are completed. Otherwise, if this context is part of a share group,
+ // those resources might leak. Also, any remaining side effects of commands
+ // issued on this context might not be visible to other contexts in the
+ // share group.
+ gles2_implementation_->Flush();
+
+ gles2_implementation_.reset();
+ }
+
+ transfer_buffer_.reset();
+ gles2_helper_.reset();
+ command_buffer_.reset();
+}
+
+#if defined(OS_ANDROID)
+scoped_refptr<gfx::SurfaceTexture>
+GLInProcessContextImpl::GetSurfaceTexture(uint32 stream_id) {
+ return command_buffer_->GetSurfaceTexture(stream_id);
+}
+#endif
+
+} // anonymous namespace
+
+GLInProcessContextSharedMemoryLimits::GLInProcessContextSharedMemoryLimits()
+ : command_buffer_size(kDefaultCommandBufferSize),
+ start_transfer_buffer_size(kDefaultStartTransferBufferSize),
+ min_transfer_buffer_size(kDefaultMinTransferBufferSize),
+ max_transfer_buffer_size(kDefaultMaxTransferBufferSize),
+ mapped_memory_reclaim_limit(gles2::GLES2Implementation::kNoLimit) {
+}
+
+// static
+GLInProcessContext* GLInProcessContext::Create(
+ scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ GLInProcessContext* share_context,
+ bool use_global_share_group,
+ const ::gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const GLInProcessContextSharedMemoryLimits& memory_limits) {
+ DCHECK(!use_global_share_group || !share_context);
+ if (surface.get()) {
+ DCHECK_EQ(surface->IsOffscreen(), is_offscreen);
+ DCHECK(surface->GetSize() == size);
+ DCHECK_EQ(gfx::kNullAcceleratedWidget, window);
+ }
+
+ scoped_ptr<GLInProcessContextImpl> context(
+ new GLInProcessContextImpl(memory_limits));
+ if (!context->Initialize(surface,
+ is_offscreen,
+ use_global_share_group,
+ share_context,
+ window,
+ size,
+ attribs,
+ gpu_preference,
+ service))
+ return NULL;
+
+ return context.release();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gl_in_process_context.h b/gpu/command_buffer/client/gl_in_process_context.h
new file mode 100644
index 0000000..33b1348
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "gl_in_process_context_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/in_process_command_buffer.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+
+namespace gfx {
+class Size;
+}
+
+#if defined(OS_ANDROID)
+namespace gfx {
+class SurfaceTexture;
+}
+#endif
+
+namespace gpu {
+
+namespace gles2 {
+class GLES2Implementation;
+}
+
+struct GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContextSharedMemoryLimits {
+ GLInProcessContextSharedMemoryLimits();
+
+ int32 command_buffer_size;
+ unsigned int start_transfer_buffer_size;
+ unsigned int min_transfer_buffer_size;
+ unsigned int max_transfer_buffer_size;
+ unsigned int mapped_memory_reclaim_limit;
+};
+
+class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
+ public:
+ virtual ~GLInProcessContext() {}
+
+ // Create a GLInProcessContext, if |is_offscreen| is true, renders to an
+ // offscreen context. |attrib_list| must be NULL or a NONE-terminated list
+ // of attribute/value pairs.
+ // If |surface| is not NULL, then it must match |is_offscreen| and |size|,
+ // |window| must be gfx::kNullAcceleratedWidget, and the command buffer
+ // service must run on the same thread as this client because GLSurface is
+ // not thread safe. If |surface| is NULL, then the other parameters are used
+ // to correctly create a surface.
+ // Only one of |share_context| and |use_global_share_group| can be used at
+ // the same time.
+ static GLInProcessContext* Create(
+ scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ GLInProcessContext* share_context,
+ bool use_global_share_group,
+ const gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const GLInProcessContextSharedMemoryLimits& memory_limits);
+
+ virtual void SetContextLostCallback(const base::Closure& callback) = 0;
+
+ // Allows direct access to the GLES2 implementation so a GLInProcessContext
+ // can be used without making it current.
+ virtual gles2::GLES2Implementation* GetImplementation() = 0;
+
+ virtual size_t GetMappedMemoryLimit() = 0;
+
+#if defined(OS_ANDROID)
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id) = 0;
+#endif
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
diff --git a/gpu/command_buffer/client/gl_in_process_context_export.h b/gpu/command_buffer/client/gl_in_process_context_export.h
new file mode 100644
index 0000000..36c4a34
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context_export.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GL_IN_PROCESS_CONTEXT_EXPORT_H_
+#define GL_IN_PROCESS_CONTEXT_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+#define GL_IN_PROCESS_CONTEXT_EXPORT __declspec(dllexport)
+#else
+#define GL_IN_PROCESS_CONTEXT_EXPORT __declspec(dllimport)
+#endif // defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+#define GL_IN_PROCESS_CONTEXT_EXPORT __attribute__((visibility("default")))
+#else
+#define GL_IN_PROCESS_CONTEXT_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GL_IN_PROCESS_CONTEXT_EXPORT
+#endif
+
+#endif // GL_IN_PROCESS_CONTEXT_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_c_lib.cc b/gpu/command_buffer/client/gles2_c_lib.cc
new file mode 100644
index 0000000..fbe7b55
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These functions emulate GLES2 over command buffers for C.
+
+#include <assert.h>
+#include <stdlib.h>
+#include "gpu/command_buffer/client/gles2_lib.h"
+
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+extern "C" {
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_c_lib_autogen.h"
+} // extern "C"
+
+
diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h
new file mode 100644
index 0000000..e11cf63
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -0,0 +1,1849 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// These functions emulate GLES2 over command buffers.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
+
+void GLES2ActiveTexture(GLenum texture) {
+ gles2::GetGLContext()->ActiveTexture(texture);
+}
+void GLES2AttachShader(GLuint program, GLuint shader) {
+ gles2::GetGLContext()->AttachShader(program, shader);
+}
+void GLES2BindAttribLocation(GLuint program, GLuint index, const char* name) {
+ gles2::GetGLContext()->BindAttribLocation(program, index, name);
+}
+void GLES2BindBuffer(GLenum target, GLuint buffer) {
+ gles2::GetGLContext()->BindBuffer(target, buffer);
+}
+void GLES2BindFramebuffer(GLenum target, GLuint framebuffer) {
+ gles2::GetGLContext()->BindFramebuffer(target, framebuffer);
+}
+void GLES2BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ gles2::GetGLContext()->BindRenderbuffer(target, renderbuffer);
+}
+void GLES2BindTexture(GLenum target, GLuint texture) {
+ gles2::GetGLContext()->BindTexture(target, texture);
+}
+void GLES2BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ gles2::GetGLContext()->BlendColor(red, green, blue, alpha);
+}
+void GLES2BlendEquation(GLenum mode) {
+ gles2::GetGLContext()->BlendEquation(mode);
+}
+void GLES2BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) {
+ gles2::GetGLContext()->BlendEquationSeparate(modeRGB, modeAlpha);
+}
+void GLES2BlendFunc(GLenum sfactor, GLenum dfactor) {
+ gles2::GetGLContext()->BlendFunc(sfactor, dfactor);
+}
+void GLES2BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::GetGLContext()->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+void GLES2BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) {
+ gles2::GetGLContext()->BufferData(target, size, data, usage);
+}
+void GLES2BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) {
+ gles2::GetGLContext()->BufferSubData(target, offset, size, data);
+}
+GLenum GLES2CheckFramebufferStatus(GLenum target) {
+ return gles2::GetGLContext()->CheckFramebufferStatus(target);
+}
+void GLES2Clear(GLbitfield mask) {
+ gles2::GetGLContext()->Clear(mask);
+}
+void GLES2ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ gles2::GetGLContext()->ClearColor(red, green, blue, alpha);
+}
+void GLES2ClearDepthf(GLclampf depth) {
+ gles2::GetGLContext()->ClearDepthf(depth);
+}
+void GLES2ClearStencil(GLint s) {
+ gles2::GetGLContext()->ClearStencil(s);
+}
+void GLES2ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ gles2::GetGLContext()->ColorMask(red, green, blue, alpha);
+}
+void GLES2CompileShader(GLuint shader) {
+ gles2::GetGLContext()->CompileShader(shader);
+}
+void GLES2CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) {
+ gles2::GetGLContext()->CompressedTexImage2D(
+ target, level, internalformat, width, height, border, imageSize, data);
+}
+void GLES2CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) {
+ gles2::GetGLContext()->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+}
+void GLES2CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ gles2::GetGLContext()->CopyTexImage2D(
+ target, level, internalformat, x, y, width, height, border);
+}
+void GLES2CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->CopyTexSubImage2D(
+ target, level, xoffset, yoffset, x, y, width, height);
+}
+GLuint GLES2CreateProgram() {
+ return gles2::GetGLContext()->CreateProgram();
+}
+GLuint GLES2CreateShader(GLenum type) {
+ return gles2::GetGLContext()->CreateShader(type);
+}
+void GLES2CullFace(GLenum mode) {
+ gles2::GetGLContext()->CullFace(mode);
+}
+void GLES2DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ gles2::GetGLContext()->DeleteBuffers(n, buffers);
+}
+void GLES2DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) {
+ gles2::GetGLContext()->DeleteFramebuffers(n, framebuffers);
+}
+void GLES2DeleteProgram(GLuint program) {
+ gles2::GetGLContext()->DeleteProgram(program);
+}
+void GLES2DeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) {
+ gles2::GetGLContext()->DeleteRenderbuffers(n, renderbuffers);
+}
+void GLES2DeleteShader(GLuint shader) {
+ gles2::GetGLContext()->DeleteShader(shader);
+}
+void GLES2DeleteTextures(GLsizei n, const GLuint* textures) {
+ gles2::GetGLContext()->DeleteTextures(n, textures);
+}
+void GLES2DepthFunc(GLenum func) {
+ gles2::GetGLContext()->DepthFunc(func);
+}
+void GLES2DepthMask(GLboolean flag) {
+ gles2::GetGLContext()->DepthMask(flag);
+}
+void GLES2DepthRangef(GLclampf zNear, GLclampf zFar) {
+ gles2::GetGLContext()->DepthRangef(zNear, zFar);
+}
+void GLES2DetachShader(GLuint program, GLuint shader) {
+ gles2::GetGLContext()->DetachShader(program, shader);
+}
+void GLES2Disable(GLenum cap) {
+ gles2::GetGLContext()->Disable(cap);
+}
+void GLES2DisableVertexAttribArray(GLuint index) {
+ gles2::GetGLContext()->DisableVertexAttribArray(index);
+}
+void GLES2DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ gles2::GetGLContext()->DrawArrays(mode, first, count);
+}
+void GLES2DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) {
+ gles2::GetGLContext()->DrawElements(mode, count, type, indices);
+}
+void GLES2Enable(GLenum cap) {
+ gles2::GetGLContext()->Enable(cap);
+}
+void GLES2EnableVertexAttribArray(GLuint index) {
+ gles2::GetGLContext()->EnableVertexAttribArray(index);
+}
+void GLES2Finish() {
+ gles2::GetGLContext()->Finish();
+}
+void GLES2Flush() {
+ gles2::GetGLContext()->Flush();
+}
+void GLES2FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ gles2::GetGLContext()->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+}
+void GLES2FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ gles2::GetGLContext()->FramebufferTexture2D(
+ target, attachment, textarget, texture, level);
+}
+void GLES2FrontFace(GLenum mode) {
+ gles2::GetGLContext()->FrontFace(mode);
+}
+void GLES2GenBuffers(GLsizei n, GLuint* buffers) {
+ gles2::GetGLContext()->GenBuffers(n, buffers);
+}
+void GLES2GenerateMipmap(GLenum target) {
+ gles2::GetGLContext()->GenerateMipmap(target);
+}
+void GLES2GenFramebuffers(GLsizei n, GLuint* framebuffers) {
+ gles2::GetGLContext()->GenFramebuffers(n, framebuffers);
+}
+void GLES2GenRenderbuffers(GLsizei n, GLuint* renderbuffers) {
+ gles2::GetGLContext()->GenRenderbuffers(n, renderbuffers);
+}
+void GLES2GenTextures(GLsizei n, GLuint* textures) {
+ gles2::GetGLContext()->GenTextures(n, textures);
+}
+void GLES2GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ gles2::GetGLContext()->GetActiveAttrib(
+ program, index, bufsize, length, size, type, name);
+}
+void GLES2GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ gles2::GetGLContext()->GetActiveUniform(
+ program, index, bufsize, length, size, type, name);
+}
+void GLES2GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) {
+ gles2::GetGLContext()->GetAttachedShaders(program, maxcount, count, shaders);
+}
+GLint GLES2GetAttribLocation(GLuint program, const char* name) {
+ return gles2::GetGLContext()->GetAttribLocation(program, name);
+}
+void GLES2GetBooleanv(GLenum pname, GLboolean* params) {
+ gles2::GetGLContext()->GetBooleanv(pname, params);
+}
+void GLES2GetBufferParameteriv(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetBufferParameteriv(target, pname, params);
+}
+GLenum GLES2GetError() {
+ return gles2::GetGLContext()->GetError();
+}
+void GLES2GetFloatv(GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetFloatv(pname, params);
+}
+void GLES2GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetFramebufferAttachmentParameteriv(
+ target, attachment, pname, params);
+}
+void GLES2GetIntegerv(GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetIntegerv(pname, params);
+}
+void GLES2GetProgramiv(GLuint program, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetProgramiv(program, pname, params);
+}
+void GLES2GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ gles2::GetGLContext()->GetProgramInfoLog(program, bufsize, length, infolog);
+}
+void GLES2GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetRenderbufferParameteriv(target, pname, params);
+}
+void GLES2GetShaderiv(GLuint shader, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetShaderiv(shader, pname, params);
+}
+void GLES2GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ gles2::GetGLContext()->GetShaderInfoLog(shader, bufsize, length, infolog);
+}
+void GLES2GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) {
+ gles2::GetGLContext()->GetShaderPrecisionFormat(
+ shadertype, precisiontype, range, precision);
+}
+void GLES2GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ gles2::GetGLContext()->GetShaderSource(shader, bufsize, length, source);
+}
+const GLubyte* GLES2GetString(GLenum name) {
+ return gles2::GetGLContext()->GetString(name);
+}
+void GLES2GetTexParameterfv(GLenum target, GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetTexParameterfv(target, pname, params);
+}
+void GLES2GetTexParameteriv(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetTexParameteriv(target, pname, params);
+}
+void GLES2GetUniformfv(GLuint program, GLint location, GLfloat* params) {
+ gles2::GetGLContext()->GetUniformfv(program, location, params);
+}
+void GLES2GetUniformiv(GLuint program, GLint location, GLint* params) {
+ gles2::GetGLContext()->GetUniformiv(program, location, params);
+}
+GLint GLES2GetUniformLocation(GLuint program, const char* name) {
+ return gles2::GetGLContext()->GetUniformLocation(program, name);
+}
+void GLES2GetVertexAttribfv(GLuint index, GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetVertexAttribfv(index, pname, params);
+}
+void GLES2GetVertexAttribiv(GLuint index, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetVertexAttribiv(index, pname, params);
+}
+void GLES2GetVertexAttribPointerv(GLuint index, GLenum pname, void** pointer) {
+ gles2::GetGLContext()->GetVertexAttribPointerv(index, pname, pointer);
+}
+void GLES2Hint(GLenum target, GLenum mode) {
+ gles2::GetGLContext()->Hint(target, mode);
+}
+GLboolean GLES2IsBuffer(GLuint buffer) {
+ return gles2::GetGLContext()->IsBuffer(buffer);
+}
+GLboolean GLES2IsEnabled(GLenum cap) {
+ return gles2::GetGLContext()->IsEnabled(cap);
+}
+GLboolean GLES2IsFramebuffer(GLuint framebuffer) {
+ return gles2::GetGLContext()->IsFramebuffer(framebuffer);
+}
+GLboolean GLES2IsProgram(GLuint program) {
+ return gles2::GetGLContext()->IsProgram(program);
+}
+GLboolean GLES2IsRenderbuffer(GLuint renderbuffer) {
+ return gles2::GetGLContext()->IsRenderbuffer(renderbuffer);
+}
+GLboolean GLES2IsShader(GLuint shader) {
+ return gles2::GetGLContext()->IsShader(shader);
+}
+GLboolean GLES2IsTexture(GLuint texture) {
+ return gles2::GetGLContext()->IsTexture(texture);
+}
+void GLES2LineWidth(GLfloat width) {
+ gles2::GetGLContext()->LineWidth(width);
+}
+void GLES2LinkProgram(GLuint program) {
+ gles2::GetGLContext()->LinkProgram(program);
+}
+void GLES2PixelStorei(GLenum pname, GLint param) {
+ gles2::GetGLContext()->PixelStorei(pname, param);
+}
+void GLES2PolygonOffset(GLfloat factor, GLfloat units) {
+ gles2::GetGLContext()->PolygonOffset(factor, units);
+}
+void GLES2ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) {
+ gles2::GetGLContext()->ReadPixels(x, y, width, height, format, type, pixels);
+}
+void GLES2ReleaseShaderCompiler() {
+ gles2::GetGLContext()->ReleaseShaderCompiler();
+}
+void GLES2RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorage(
+ target, internalformat, width, height);
+}
+void GLES2SampleCoverage(GLclampf value, GLboolean invert) {
+ gles2::GetGLContext()->SampleCoverage(value, invert);
+}
+void GLES2Scissor(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::GetGLContext()->Scissor(x, y, width, height);
+}
+void GLES2ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) {
+ gles2::GetGLContext()->ShaderBinary(n, shaders, binaryformat, binary, length);
+}
+void GLES2ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) {
+ gles2::GetGLContext()->ShaderSource(shader, count, str, length);
+}
+void GLES2ShallowFinishCHROMIUM() {
+ gles2::GetGLContext()->ShallowFinishCHROMIUM();
+}
+void GLES2ShallowFlushCHROMIUM() {
+ gles2::GetGLContext()->ShallowFlushCHROMIUM();
+}
+void GLES2StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ gles2::GetGLContext()->StencilFunc(func, ref, mask);
+}
+void GLES2StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ gles2::GetGLContext()->StencilFuncSeparate(face, func, ref, mask);
+}
+void GLES2StencilMask(GLuint mask) {
+ gles2::GetGLContext()->StencilMask(mask);
+}
+void GLES2StencilMaskSeparate(GLenum face, GLuint mask) {
+ gles2::GetGLContext()->StencilMaskSeparate(face, mask);
+}
+void GLES2StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::GetGLContext()->StencilOp(fail, zfail, zpass);
+}
+void GLES2StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ gles2::GetGLContext()->StencilOpSeparate(face, fail, zfail, zpass);
+}
+void GLES2TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->TexImage2D(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+void GLES2TexParameterf(GLenum target, GLenum pname, GLfloat param) {
+ gles2::GetGLContext()->TexParameterf(target, pname, param);
+}
+void GLES2TexParameterfv(GLenum target, GLenum pname, const GLfloat* params) {
+ gles2::GetGLContext()->TexParameterfv(target, pname, params);
+}
+void GLES2TexParameteri(GLenum target, GLenum pname, GLint param) {
+ gles2::GetGLContext()->TexParameteri(target, pname, param);
+}
+void GLES2TexParameteriv(GLenum target, GLenum pname, const GLint* params) {
+ gles2::GetGLContext()->TexParameteriv(target, pname, params);
+}
+void GLES2TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+void GLES2Uniform1f(GLint location, GLfloat x) {
+ gles2::GetGLContext()->Uniform1f(location, x);
+}
+void GLES2Uniform1fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform1fv(location, count, v);
+}
+void GLES2Uniform1i(GLint location, GLint x) {
+ gles2::GetGLContext()->Uniform1i(location, x);
+}
+void GLES2Uniform1iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform1iv(location, count, v);
+}
+void GLES2Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ gles2::GetGLContext()->Uniform2f(location, x, y);
+}
+void GLES2Uniform2fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform2fv(location, count, v);
+}
+void GLES2Uniform2i(GLint location, GLint x, GLint y) {
+ gles2::GetGLContext()->Uniform2i(location, x, y);
+}
+void GLES2Uniform2iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform2iv(location, count, v);
+}
+void GLES2Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::GetGLContext()->Uniform3f(location, x, y, z);
+}
+void GLES2Uniform3fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform3fv(location, count, v);
+}
+void GLES2Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ gles2::GetGLContext()->Uniform3i(location, x, y, z);
+}
+void GLES2Uniform3iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform3iv(location, count, v);
+}
+void GLES2Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ gles2::GetGLContext()->Uniform4f(location, x, y, z, w);
+}
+void GLES2Uniform4fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform4fv(location, count, v);
+}
+void GLES2Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) {
+ gles2::GetGLContext()->Uniform4i(location, x, y, z, w);
+}
+void GLES2Uniform4iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform4iv(location, count, v);
+}
+void GLES2UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix2fv(location, count, transpose, value);
+}
+void GLES2UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix3fv(location, count, transpose, value);
+}
+void GLES2UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix4fv(location, count, transpose, value);
+}
+void GLES2UseProgram(GLuint program) {
+ gles2::GetGLContext()->UseProgram(program);
+}
+void GLES2ValidateProgram(GLuint program) {
+ gles2::GetGLContext()->ValidateProgram(program);
+}
+void GLES2VertexAttrib1f(GLuint indx, GLfloat x) {
+ gles2::GetGLContext()->VertexAttrib1f(indx, x);
+}
+void GLES2VertexAttrib1fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib1fv(indx, values);
+}
+void GLES2VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ gles2::GetGLContext()->VertexAttrib2f(indx, x, y);
+}
+void GLES2VertexAttrib2fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib2fv(indx, values);
+}
+void GLES2VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::GetGLContext()->VertexAttrib3f(indx, x, y, z);
+}
+void GLES2VertexAttrib3fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib3fv(indx, values);
+}
+void GLES2VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ gles2::GetGLContext()->VertexAttrib4f(indx, x, y, z, w);
+}
+void GLES2VertexAttrib4fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib4fv(indx, values);
+}
+void GLES2VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ gles2::GetGLContext()->VertexAttribPointer(
+ indx, size, type, normalized, stride, ptr);
+}
+void GLES2Viewport(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::GetGLContext()->Viewport(x, y, width, height);
+}
+void GLES2BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ gles2::GetGLContext()->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+}
+void GLES2RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+}
+void GLES2RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+}
+void GLES2FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ gles2::GetGLContext()->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, level, samples);
+}
+void GLES2TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->TexStorage2DEXT(
+ target, levels, internalFormat, width, height);
+}
+void GLES2GenQueriesEXT(GLsizei n, GLuint* queries) {
+ gles2::GetGLContext()->GenQueriesEXT(n, queries);
+}
+void GLES2DeleteQueriesEXT(GLsizei n, const GLuint* queries) {
+ gles2::GetGLContext()->DeleteQueriesEXT(n, queries);
+}
+GLboolean GLES2IsQueryEXT(GLuint id) {
+ return gles2::GetGLContext()->IsQueryEXT(id);
+}
+void GLES2BeginQueryEXT(GLenum target, GLuint id) {
+ gles2::GetGLContext()->BeginQueryEXT(target, id);
+}
+void GLES2EndQueryEXT(GLenum target) {
+ gles2::GetGLContext()->EndQueryEXT(target);
+}
+void GLES2GetQueryivEXT(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetQueryivEXT(target, pname, params);
+}
+void GLES2GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) {
+ gles2::GetGLContext()->GetQueryObjectuivEXT(id, pname, params);
+}
+void GLES2InsertEventMarkerEXT(GLsizei length, const GLchar* marker) {
+ gles2::GetGLContext()->InsertEventMarkerEXT(length, marker);
+}
+void GLES2PushGroupMarkerEXT(GLsizei length, const GLchar* marker) {
+ gles2::GetGLContext()->PushGroupMarkerEXT(length, marker);
+}
+void GLES2PopGroupMarkerEXT() {
+ gles2::GetGLContext()->PopGroupMarkerEXT();
+}
+void GLES2GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ gles2::GetGLContext()->GenVertexArraysOES(n, arrays);
+}
+void GLES2DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) {
+ gles2::GetGLContext()->DeleteVertexArraysOES(n, arrays);
+}
+GLboolean GLES2IsVertexArrayOES(GLuint array) {
+ return gles2::GetGLContext()->IsVertexArrayOES(array);
+}
+void GLES2BindVertexArrayOES(GLuint array) {
+ gles2::GetGLContext()->BindVertexArrayOES(array);
+}
+void GLES2SwapBuffers() {
+ gles2::GetGLContext()->SwapBuffers();
+}
+GLuint GLES2GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) {
+ return gles2::GetGLContext()->GetMaxValueInBufferCHROMIUM(
+ buffer_id, count, type, offset);
+}
+GLboolean GLES2EnableFeatureCHROMIUM(const char* feature) {
+ return gles2::GetGLContext()->EnableFeatureCHROMIUM(feature);
+}
+void* GLES2MapBufferCHROMIUM(GLuint target, GLenum access) {
+ return gles2::GetGLContext()->MapBufferCHROMIUM(target, access);
+}
+GLboolean GLES2UnmapBufferCHROMIUM(GLuint target) {
+ return gles2::GetGLContext()->UnmapBufferCHROMIUM(target);
+}
+void* GLES2MapImageCHROMIUM(GLuint image_id) {
+ return gles2::GetGLContext()->MapImageCHROMIUM(image_id);
+}
+void GLES2UnmapImageCHROMIUM(GLuint image_id) {
+ gles2::GetGLContext()->UnmapImageCHROMIUM(image_id);
+}
+void* GLES2MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) {
+ return gles2::GetGLContext()->MapBufferSubDataCHROMIUM(
+ target, offset, size, access);
+}
+void GLES2UnmapBufferSubDataCHROMIUM(const void* mem) {
+ gles2::GetGLContext()->UnmapBufferSubDataCHROMIUM(mem);
+}
+void* GLES2MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ return gles2::GetGLContext()->MapTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, access);
+}
+void GLES2UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ gles2::GetGLContext()->UnmapTexSubImage2DCHROMIUM(mem);
+}
+void GLES2ResizeCHROMIUM(GLuint width, GLuint height, GLfloat scale_factor) {
+ gles2::GetGLContext()->ResizeCHROMIUM(width, height, scale_factor);
+}
+const GLchar* GLES2GetRequestableExtensionsCHROMIUM() {
+ return gles2::GetGLContext()->GetRequestableExtensionsCHROMIUM();
+}
+void GLES2RequestExtensionCHROMIUM(const char* extension) {
+ gles2::GetGLContext()->RequestExtensionCHROMIUM(extension);
+}
+void GLES2RateLimitOffscreenContextCHROMIUM() {
+ gles2::GetGLContext()->RateLimitOffscreenContextCHROMIUM();
+}
+void GLES2GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) {
+ gles2::GetGLContext()->GetMultipleIntegervCHROMIUM(
+ pnames, count, results, size);
+}
+void GLES2GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) {
+ gles2::GetGLContext()->GetProgramInfoCHROMIUM(program, bufsize, size, info);
+}
+GLuint GLES2CreateStreamTextureCHROMIUM(GLuint texture) {
+ return gles2::GetGLContext()->CreateStreamTextureCHROMIUM(texture);
+}
+GLuint GLES2CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ return gles2::GetGLContext()->CreateImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+void GLES2DestroyImageCHROMIUM(GLuint image_id) {
+ gles2::GetGLContext()->DestroyImageCHROMIUM(image_id);
+}
+void GLES2GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetImageParameterivCHROMIUM(image_id, pname, params);
+}
+GLuint GLES2CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ return gles2::GetGLContext()->CreateGpuMemoryBufferImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+void GLES2GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ gles2::GetGLContext()->GetTranslatedShaderSourceANGLE(
+ shader, bufsize, length, source);
+}
+void GLES2PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
+ gles2::GetGLContext()->PostSubBufferCHROMIUM(x, y, width, height);
+}
+void GLES2TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ gles2::GetGLContext()->TexImageIOSurface2DCHROMIUM(
+ target, width, height, ioSurfaceId, plane);
+}
+void GLES2CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ gles2::GetGLContext()->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+}
+void GLES2DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ gles2::GetGLContext()->DrawArraysInstancedANGLE(
+ mode, first, count, primcount);
+}
+void GLES2DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) {
+ gles2::GetGLContext()->DrawElementsInstancedANGLE(
+ mode, count, type, indices, primcount);
+}
+void GLES2VertexAttribDivisorANGLE(GLuint index, GLuint divisor) {
+ gles2::GetGLContext()->VertexAttribDivisorANGLE(index, divisor);
+}
+void GLES2GenMailboxCHROMIUM(GLbyte* mailbox) {
+ gles2::GetGLContext()->GenMailboxCHROMIUM(mailbox);
+}
+void GLES2ProduceTextureCHROMIUM(GLenum target, const GLbyte* mailbox) {
+ gles2::GetGLContext()->ProduceTextureCHROMIUM(target, mailbox);
+}
+void GLES2ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ gles2::GetGLContext()->ProduceTextureDirectCHROMIUM(texture, target, mailbox);
+}
+void GLES2ConsumeTextureCHROMIUM(GLenum target, const GLbyte* mailbox) {
+ gles2::GetGLContext()->ConsumeTextureCHROMIUM(target, mailbox);
+}
+GLuint GLES2CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ return gles2::GetGLContext()->CreateAndConsumeTextureCHROMIUM(target,
+ mailbox);
+}
+void GLES2BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) {
+ gles2::GetGLContext()->BindUniformLocationCHROMIUM(program, location, name);
+}
+void GLES2BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::GetGLContext()->BindTexImage2DCHROMIUM(target, imageId);
+}
+void GLES2ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::GetGLContext()->ReleaseTexImage2DCHROMIUM(target, imageId);
+}
+void GLES2TraceBeginCHROMIUM(const char* name) {
+ gles2::GetGLContext()->TraceBeginCHROMIUM(name);
+}
+void GLES2TraceEndCHROMIUM() {
+ gles2::GetGLContext()->TraceEndCHROMIUM();
+}
+void GLES2AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) {
+ gles2::GetGLContext()->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+}
+void GLES2AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->AsyncTexImage2DCHROMIUM(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+void GLES2WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ gles2::GetGLContext()->WaitAsyncTexImage2DCHROMIUM(target);
+}
+void GLES2WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::GetGLContext()->WaitAllAsyncTexImage2DCHROMIUM();
+}
+void GLES2DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ gles2::GetGLContext()->DiscardFramebufferEXT(target, count, attachments);
+}
+void GLES2LoseContextCHROMIUM(GLenum current, GLenum other) {
+ gles2::GetGLContext()->LoseContextCHROMIUM(current, other);
+}
+GLuint GLES2InsertSyncPointCHROMIUM() {
+ return gles2::GetGLContext()->InsertSyncPointCHROMIUM();
+}
+void GLES2WaitSyncPointCHROMIUM(GLuint sync_point) {
+ gles2::GetGLContext()->WaitSyncPointCHROMIUM(sync_point);
+}
+void GLES2DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
+ gles2::GetGLContext()->DrawBuffersEXT(count, bufs);
+}
+void GLES2DiscardBackbufferCHROMIUM() {
+ gles2::GetGLContext()->DiscardBackbufferCHROMIUM();
+}
+void GLES2ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ gles2::GetGLContext()->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+}
+void GLES2MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) {
+ gles2::GetGLContext()->MatrixLoadfCHROMIUM(matrixMode, m);
+}
+void GLES2MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ gles2::GetGLContext()->MatrixLoadIdentityCHROMIUM(matrixMode);
+}
+
+namespace gles2 {
+
+extern const NameToFunc g_gles2_function_table[] = {
+ {
+ "glActiveTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glActiveTexture),
+ },
+ {
+ "glAttachShader",
+ reinterpret_cast<GLES2FunctionPointer>(glAttachShader),
+ },
+ {
+ "glBindAttribLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glBindAttribLocation),
+ },
+ {
+ "glBindBuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindBuffer),
+ },
+ {
+ "glBindFramebuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindFramebuffer),
+ },
+ {
+ "glBindRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindRenderbuffer),
+ },
+ {
+ "glBindTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glBindTexture),
+ },
+ {
+ "glBlendColor",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendColor),
+ },
+ {
+ "glBlendEquation",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquation),
+ },
+ {
+ "glBlendEquationSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquationSeparate),
+ },
+ {
+ "glBlendFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFunc),
+ },
+ {
+ "glBlendFuncSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFuncSeparate),
+ },
+ {
+ "glBufferData",
+ reinterpret_cast<GLES2FunctionPointer>(glBufferData),
+ },
+ {
+ "glBufferSubData",
+ reinterpret_cast<GLES2FunctionPointer>(glBufferSubData),
+ },
+ {
+ "glCheckFramebufferStatus",
+ reinterpret_cast<GLES2FunctionPointer>(glCheckFramebufferStatus),
+ },
+ {
+ "glClear",
+ reinterpret_cast<GLES2FunctionPointer>(glClear),
+ },
+ {
+ "glClearColor",
+ reinterpret_cast<GLES2FunctionPointer>(glClearColor),
+ },
+ {
+ "glClearDepthf",
+ reinterpret_cast<GLES2FunctionPointer>(glClearDepthf),
+ },
+ {
+ "glClearStencil",
+ reinterpret_cast<GLES2FunctionPointer>(glClearStencil),
+ },
+ {
+ "glColorMask",
+ reinterpret_cast<GLES2FunctionPointer>(glColorMask),
+ },
+ {
+ "glCompileShader",
+ reinterpret_cast<GLES2FunctionPointer>(glCompileShader),
+ },
+ {
+ "glCompressedTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCompressedTexImage2D),
+ },
+ {
+ "glCompressedTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCompressedTexSubImage2D),
+ },
+ {
+ "glCopyTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTexImage2D),
+ },
+ {
+ "glCopyTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTexSubImage2D),
+ },
+ {
+ "glCreateProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateProgram),
+ },
+ {
+ "glCreateShader",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateShader),
+ },
+ {
+ "glCullFace",
+ reinterpret_cast<GLES2FunctionPointer>(glCullFace),
+ },
+ {
+ "glDeleteBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteBuffers),
+ },
+ {
+ "glDeleteFramebuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteFramebuffers),
+ },
+ {
+ "glDeleteProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteProgram),
+ },
+ {
+ "glDeleteRenderbuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteRenderbuffers),
+ },
+ {
+ "glDeleteShader",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteShader),
+ },
+ {
+ "glDeleteTextures",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteTextures),
+ },
+ {
+ "glDepthFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthFunc),
+ },
+ {
+ "glDepthMask",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthMask),
+ },
+ {
+ "glDepthRangef",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthRangef),
+ },
+ {
+ "glDetachShader",
+ reinterpret_cast<GLES2FunctionPointer>(glDetachShader),
+ },
+ {
+ "glDisable",
+ reinterpret_cast<GLES2FunctionPointer>(glDisable),
+ },
+ {
+ "glDisableVertexAttribArray",
+ reinterpret_cast<GLES2FunctionPointer>(glDisableVertexAttribArray),
+ },
+ {
+ "glDrawArrays",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawArrays),
+ },
+ {
+ "glDrawElements",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawElements),
+ },
+ {
+ "glEnable",
+ reinterpret_cast<GLES2FunctionPointer>(glEnable),
+ },
+ {
+ "glEnableVertexAttribArray",
+ reinterpret_cast<GLES2FunctionPointer>(glEnableVertexAttribArray),
+ },
+ {
+ "glFinish",
+ reinterpret_cast<GLES2FunctionPointer>(glFinish),
+ },
+ {
+ "glFlush",
+ reinterpret_cast<GLES2FunctionPointer>(glFlush),
+ },
+ {
+ "glFramebufferRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glFramebufferRenderbuffer),
+ },
+ {
+ "glFramebufferTexture2D",
+ reinterpret_cast<GLES2FunctionPointer>(glFramebufferTexture2D),
+ },
+ {
+ "glFrontFace",
+ reinterpret_cast<GLES2FunctionPointer>(glFrontFace),
+ },
+ {
+ "glGenBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenBuffers),
+ },
+ {
+ "glGenerateMipmap",
+ reinterpret_cast<GLES2FunctionPointer>(glGenerateMipmap),
+ },
+ {
+ "glGenFramebuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenFramebuffers),
+ },
+ {
+ "glGenRenderbuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenRenderbuffers),
+ },
+ {
+ "glGenTextures",
+ reinterpret_cast<GLES2FunctionPointer>(glGenTextures),
+ },
+ {
+ "glGetActiveAttrib",
+ reinterpret_cast<GLES2FunctionPointer>(glGetActiveAttrib),
+ },
+ {
+ "glGetActiveUniform",
+ reinterpret_cast<GLES2FunctionPointer>(glGetActiveUniform),
+ },
+ {
+ "glGetAttachedShaders",
+ reinterpret_cast<GLES2FunctionPointer>(glGetAttachedShaders),
+ },
+ {
+ "glGetAttribLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glGetAttribLocation),
+ },
+ {
+ "glGetBooleanv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetBooleanv),
+ },
+ {
+ "glGetBufferParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetBufferParameteriv),
+ },
+ {
+ "glGetError",
+ reinterpret_cast<GLES2FunctionPointer>(glGetError),
+ },
+ {
+ "glGetFloatv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetFloatv),
+ },
+ {
+ "glGetFramebufferAttachmentParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glGetFramebufferAttachmentParameteriv),
+ },
+ {
+ "glGetIntegerv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetIntegerv),
+ },
+ {
+ "glGetProgramiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramiv),
+ },
+ {
+ "glGetProgramInfoLog",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramInfoLog),
+ },
+ {
+ "glGetRenderbufferParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetRenderbufferParameteriv),
+ },
+ {
+ "glGetShaderiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderiv),
+ },
+ {
+ "glGetShaderInfoLog",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderInfoLog),
+ },
+ {
+ "glGetShaderPrecisionFormat",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderPrecisionFormat),
+ },
+ {
+ "glGetShaderSource",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderSource),
+ },
+ {
+ "glGetString",
+ reinterpret_cast<GLES2FunctionPointer>(glGetString),
+ },
+ {
+ "glGetTexParameterfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTexParameterfv),
+ },
+ {
+ "glGetTexParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTexParameteriv),
+ },
+ {
+ "glGetUniformfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformfv),
+ },
+ {
+ "glGetUniformiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformiv),
+ },
+ {
+ "glGetUniformLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformLocation),
+ },
+ {
+ "glGetVertexAttribfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribfv),
+ },
+ {
+ "glGetVertexAttribiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribiv),
+ },
+ {
+ "glGetVertexAttribPointerv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribPointerv),
+ },
+ {
+ "glHint",
+ reinterpret_cast<GLES2FunctionPointer>(glHint),
+ },
+ {
+ "glIsBuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsBuffer),
+ },
+ {
+ "glIsEnabled",
+ reinterpret_cast<GLES2FunctionPointer>(glIsEnabled),
+ },
+ {
+ "glIsFramebuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsFramebuffer),
+ },
+ {
+ "glIsProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glIsProgram),
+ },
+ {
+ "glIsRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsRenderbuffer),
+ },
+ {
+ "glIsShader",
+ reinterpret_cast<GLES2FunctionPointer>(glIsShader),
+ },
+ {
+ "glIsTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glIsTexture),
+ },
+ {
+ "glLineWidth",
+ reinterpret_cast<GLES2FunctionPointer>(glLineWidth),
+ },
+ {
+ "glLinkProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glLinkProgram),
+ },
+ {
+ "glPixelStorei",
+ reinterpret_cast<GLES2FunctionPointer>(glPixelStorei),
+ },
+ {
+ "glPolygonOffset",
+ reinterpret_cast<GLES2FunctionPointer>(glPolygonOffset),
+ },
+ {
+ "glReadPixels",
+ reinterpret_cast<GLES2FunctionPointer>(glReadPixels),
+ },
+ {
+ "glReleaseShaderCompiler",
+ reinterpret_cast<GLES2FunctionPointer>(glReleaseShaderCompiler),
+ },
+ {
+ "glRenderbufferStorage",
+ reinterpret_cast<GLES2FunctionPointer>(glRenderbufferStorage),
+ },
+ {
+ "glSampleCoverage",
+ reinterpret_cast<GLES2FunctionPointer>(glSampleCoverage),
+ },
+ {
+ "glScissor",
+ reinterpret_cast<GLES2FunctionPointer>(glScissor),
+ },
+ {
+ "glShaderBinary",
+ reinterpret_cast<GLES2FunctionPointer>(glShaderBinary),
+ },
+ {
+ "glShaderSource",
+ reinterpret_cast<GLES2FunctionPointer>(glShaderSource),
+ },
+ {
+ "glShallowFinishCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glShallowFinishCHROMIUM),
+ },
+ {
+ "glShallowFlushCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glShallowFlushCHROMIUM),
+ },
+ {
+ "glStencilFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilFunc),
+ },
+ {
+ "glStencilFuncSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilFuncSeparate),
+ },
+ {
+ "glStencilMask",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilMask),
+ },
+ {
+ "glStencilMaskSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilMaskSeparate),
+ },
+ {
+ "glStencilOp",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilOp),
+ },
+ {
+ "glStencilOpSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilOpSeparate),
+ },
+ {
+ "glTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glTexImage2D),
+ },
+ {
+ "glTexParameterf",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameterf),
+ },
+ {
+ "glTexParameterfv",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameterfv),
+ },
+ {
+ "glTexParameteri",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameteri),
+ },
+ {
+ "glTexParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameteriv),
+ },
+ {
+ "glTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glTexSubImage2D),
+ },
+ {
+ "glUniform1f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1f),
+ },
+ {
+ "glUniform1fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1fv),
+ },
+ {
+ "glUniform1i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1i),
+ },
+ {
+ "glUniform1iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1iv),
+ },
+ {
+ "glUniform2f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2f),
+ },
+ {
+ "glUniform2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2fv),
+ },
+ {
+ "glUniform2i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2i),
+ },
+ {
+ "glUniform2iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2iv),
+ },
+ {
+ "glUniform3f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3f),
+ },
+ {
+ "glUniform3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3fv),
+ },
+ {
+ "glUniform3i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3i),
+ },
+ {
+ "glUniform3iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3iv),
+ },
+ {
+ "glUniform4f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4f),
+ },
+ {
+ "glUniform4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4fv),
+ },
+ {
+ "glUniform4i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4i),
+ },
+ {
+ "glUniform4iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4iv),
+ },
+ {
+ "glUniformMatrix2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix2fv),
+ },
+ {
+ "glUniformMatrix3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix3fv),
+ },
+ {
+ "glUniformMatrix4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix4fv),
+ },
+ {
+ "glUseProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glUseProgram),
+ },
+ {
+ "glValidateProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glValidateProgram),
+ },
+ {
+ "glVertexAttrib1f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib1f),
+ },
+ {
+ "glVertexAttrib1fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib1fv),
+ },
+ {
+ "glVertexAttrib2f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib2f),
+ },
+ {
+ "glVertexAttrib2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib2fv),
+ },
+ {
+ "glVertexAttrib3f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib3f),
+ },
+ {
+ "glVertexAttrib3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib3fv),
+ },
+ {
+ "glVertexAttrib4f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib4f),
+ },
+ {
+ "glVertexAttrib4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib4fv),
+ },
+ {
+ "glVertexAttribPointer",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttribPointer),
+ },
+ {
+ "glViewport",
+ reinterpret_cast<GLES2FunctionPointer>(glViewport),
+ },
+ {
+ "glBlitFramebufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBlitFramebufferCHROMIUM),
+ },
+ {
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRenderbufferStorageMultisampleCHROMIUM),
+ },
+ {
+ "glRenderbufferStorageMultisampleEXT",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRenderbufferStorageMultisampleEXT),
+ },
+ {
+ "glFramebufferTexture2DMultisampleEXT",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glFramebufferTexture2DMultisampleEXT),
+ },
+ {
+ "glTexStorage2DEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glTexStorage2DEXT),
+ },
+ {
+ "glGenQueriesEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGenQueriesEXT),
+ },
+ {
+ "glDeleteQueriesEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteQueriesEXT),
+ },
+ {
+ "glIsQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glIsQueryEXT),
+ },
+ {
+ "glBeginQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glBeginQueryEXT),
+ },
+ {
+ "glEndQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glEndQueryEXT),
+ },
+ {
+ "glGetQueryivEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGetQueryivEXT),
+ },
+ {
+ "glGetQueryObjectuivEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGetQueryObjectuivEXT),
+ },
+ {
+ "glInsertEventMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glInsertEventMarkerEXT),
+ },
+ {
+ "glPushGroupMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glPushGroupMarkerEXT),
+ },
+ {
+ "glPopGroupMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glPopGroupMarkerEXT),
+ },
+ {
+ "glGenVertexArraysOES",
+ reinterpret_cast<GLES2FunctionPointer>(glGenVertexArraysOES),
+ },
+ {
+ "glDeleteVertexArraysOES",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteVertexArraysOES),
+ },
+ {
+ "glIsVertexArrayOES",
+ reinterpret_cast<GLES2FunctionPointer>(glIsVertexArrayOES),
+ },
+ {
+ "glBindVertexArrayOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBindVertexArrayOES),
+ },
+ {
+ "glSwapBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glSwapBuffers),
+ },
+ {
+ "glGetMaxValueInBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetMaxValueInBufferCHROMIUM),
+ },
+ {
+ "glEnableFeatureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glEnableFeatureCHROMIUM),
+ },
+ {
+ "glMapBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapBufferCHROMIUM),
+ },
+ {
+ "glUnmapBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapBufferCHROMIUM),
+ },
+ {
+ "glMapImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapImageCHROMIUM),
+ },
+ {
+ "glUnmapImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapImageCHROMIUM),
+ },
+ {
+ "glMapBufferSubDataCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapBufferSubDataCHROMIUM),
+ },
+ {
+ "glUnmapBufferSubDataCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapBufferSubDataCHROMIUM),
+ },
+ {
+ "glMapTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapTexSubImage2DCHROMIUM),
+ },
+ {
+ "glUnmapTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapTexSubImage2DCHROMIUM),
+ },
+ {
+ "glResizeCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glResizeCHROMIUM),
+ },
+ {
+ "glGetRequestableExtensionsCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetRequestableExtensionsCHROMIUM),
+ },
+ {
+ "glRequestExtensionCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glRequestExtensionCHROMIUM),
+ },
+ {
+ "glRateLimitOffscreenContextCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRateLimitOffscreenContextCHROMIUM),
+ },
+ {
+ "glGetMultipleIntegervCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetMultipleIntegervCHROMIUM),
+ },
+ {
+ "glGetProgramInfoCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramInfoCHROMIUM),
+ },
+ {
+ "glCreateStreamTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateStreamTextureCHROMIUM),
+ },
+ {
+ "glCreateImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateImageCHROMIUM),
+ },
+ {
+ "glDestroyImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glDestroyImageCHROMIUM),
+ },
+ {
+ "glGetImageParameterivCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetImageParameterivCHROMIUM),
+ },
+ {
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glCreateGpuMemoryBufferImageCHROMIUM),
+ },
+ {
+ "glGetTranslatedShaderSourceANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTranslatedShaderSourceANGLE),
+ },
+ {
+ "glPostSubBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glPostSubBufferCHROMIUM),
+ },
+ {
+ "glTexImageIOSurface2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTexImageIOSurface2DCHROMIUM),
+ },
+ {
+ "glCopyTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTextureCHROMIUM),
+ },
+ {
+ "glDrawArraysInstancedANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawArraysInstancedANGLE),
+ },
+ {
+ "glDrawElementsInstancedANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawElementsInstancedANGLE),
+ },
+ {
+ "glVertexAttribDivisorANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttribDivisorANGLE),
+ },
+ {
+ "glGenMailboxCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGenMailboxCHROMIUM),
+ },
+ {
+ "glProduceTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glProduceTextureCHROMIUM),
+ },
+ {
+ "glProduceTextureDirectCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glProduceTextureDirectCHROMIUM),
+ },
+ {
+ "glConsumeTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glConsumeTextureCHROMIUM),
+ },
+ {
+ "glCreateAndConsumeTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateAndConsumeTextureCHROMIUM),
+ },
+ {
+ "glBindUniformLocationCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBindUniformLocationCHROMIUM),
+ },
+ {
+ "glBindTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBindTexImage2DCHROMIUM),
+ },
+ {
+ "glReleaseTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glReleaseTexImage2DCHROMIUM),
+ },
+ {
+ "glTraceBeginCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTraceBeginCHROMIUM),
+ },
+ {
+ "glTraceEndCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTraceEndCHROMIUM),
+ },
+ {
+ "glAsyncTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glAsyncTexSubImage2DCHROMIUM),
+ },
+ {
+ "glAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glWaitAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glWaitAllAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitAllAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glDiscardFramebufferEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDiscardFramebufferEXT),
+ },
+ {
+ "glLoseContextCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glLoseContextCHROMIUM),
+ },
+ {
+ "glInsertSyncPointCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glInsertSyncPointCHROMIUM),
+ },
+ {
+ "glWaitSyncPointCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitSyncPointCHROMIUM),
+ },
+ {
+ "glDrawBuffersEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawBuffersEXT),
+ },
+ {
+ "glDiscardBackbufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glDiscardBackbufferCHROMIUM),
+ },
+ {
+ "glScheduleOverlayPlaneCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glScheduleOverlayPlaneCHROMIUM),
+ },
+ {
+ "glMatrixLoadfCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadfCHROMIUM),
+ },
+ {
+ "glMatrixLoadIdentityCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadIdentityCHROMIUM),
+ },
+ {
+ NULL,
+ NULL,
+ },
+};
+
+} // namespace gles2
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_c_lib_export.h b/gpu/command_buffer/client/gles2_c_lib_export.h
new file mode 100644
index 0000000..ceacc6e
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GLES2_C_LIB_IMPLEMENTATION)
+#define GLES2_C_LIB_EXPORT __declspec(dllexport)
+#else
+#define GLES2_C_LIB_EXPORT __declspec(dllimport)
+#endif // defined(GLES2_C_LIB_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GLES2_C_LIB_IMPLEMENTATION)
+#define GLES2_C_LIB_EXPORT __attribute__((visibility("default")))
+#else
+#define GLES2_C_LIB_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GLES2_C_LIB_EXPORT
+#endif
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_cmd_helper.cc b/gpu/command_buffer/client/gles2_cmd_helper.cc
new file mode 100644
index 0000000..d52970a
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2CmdHelper::GLES2CmdHelper(CommandBuffer* command_buffer)
+ : CommandBufferHelper(command_buffer) {
+}
+
+GLES2CmdHelper::~GLES2CmdHelper() {
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/client/gles2_cmd_helper.h b/gpu/command_buffer/client/gles2_cmd_helper.h
new file mode 100644
index 0000000..af6cc5d
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// A class that helps write GL command buffers.
+class GPU_EXPORT GLES2CmdHelper : public CommandBufferHelper {
+ public:
+ explicit GLES2CmdHelper(CommandBuffer* command_buffer);
+ virtual ~GLES2CmdHelper();
+
+ // Include the auto-generated part of this class. We split this because it
+ // means we can easily edit the non-auto generated parts right here in this
+ // file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_cmd_helper_autogen.h"
+
+ // Helpers that could not be auto-generated.
+ // TODO(gman): Auto generate these.
+ void CreateAndConsumeTextureCHROMIUMImmediate(GLenum target,
+ uint32_t client_id,
+ const GLbyte* _mailbox) {
+ const uint32_t size =
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, client_id, _mailbox);
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLES2CmdHelper);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+
diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
new file mode 100644
index 0000000..c8432da
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -0,0 +1,1932 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
+
+void ActiveTexture(GLenum texture) {
+ gles2::cmds::ActiveTexture* c = GetCmdSpace<gles2::cmds::ActiveTexture>();
+ if (c) {
+ c->Init(texture);
+ }
+}
+
+void AttachShader(GLuint program, GLuint shader) {
+ gles2::cmds::AttachShader* c = GetCmdSpace<gles2::cmds::AttachShader>();
+ if (c) {
+ c->Init(program, shader);
+ }
+}
+
+void BindAttribLocationBucket(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id) {
+ gles2::cmds::BindAttribLocationBucket* c =
+ GetCmdSpace<gles2::cmds::BindAttribLocationBucket>();
+ if (c) {
+ c->Init(program, index, name_bucket_id);
+ }
+}
+
+void BindBuffer(GLenum target, GLuint buffer) {
+ gles2::cmds::BindBuffer* c = GetCmdSpace<gles2::cmds::BindBuffer>();
+ if (c) {
+ c->Init(target, buffer);
+ }
+}
+
+void BindFramebuffer(GLenum target, GLuint framebuffer) {
+ gles2::cmds::BindFramebuffer* c = GetCmdSpace<gles2::cmds::BindFramebuffer>();
+ if (c) {
+ c->Init(target, framebuffer);
+ }
+}
+
+void BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ gles2::cmds::BindRenderbuffer* c =
+ GetCmdSpace<gles2::cmds::BindRenderbuffer>();
+ if (c) {
+ c->Init(target, renderbuffer);
+ }
+}
+
+void BindTexture(GLenum target, GLuint texture) {
+ gles2::cmds::BindTexture* c = GetCmdSpace<gles2::cmds::BindTexture>();
+ if (c) {
+ c->Init(target, texture);
+ }
+}
+
+void BlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) {
+ gles2::cmds::BlendColor* c = GetCmdSpace<gles2::cmds::BlendColor>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void BlendEquation(GLenum mode) {
+ gles2::cmds::BlendEquation* c = GetCmdSpace<gles2::cmds::BlendEquation>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) {
+ gles2::cmds::BlendEquationSeparate* c =
+ GetCmdSpace<gles2::cmds::BlendEquationSeparate>();
+ if (c) {
+ c->Init(modeRGB, modeAlpha);
+ }
+}
+
+void BlendFunc(GLenum sfactor, GLenum dfactor) {
+ gles2::cmds::BlendFunc* c = GetCmdSpace<gles2::cmds::BlendFunc>();
+ if (c) {
+ c->Init(sfactor, dfactor);
+ }
+}
+
+void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::cmds::BlendFuncSeparate* c =
+ GetCmdSpace<gles2::cmds::BlendFuncSeparate>();
+ if (c) {
+ c->Init(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ }
+}
+
+void BufferData(GLenum target,
+ GLsizeiptr size,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset,
+ GLenum usage) {
+ gles2::cmds::BufferData* c = GetCmdSpace<gles2::cmds::BufferData>();
+ if (c) {
+ c->Init(target, size, data_shm_id, data_shm_offset, usage);
+ }
+}
+
+void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::BufferSubData* c = GetCmdSpace<gles2::cmds::BufferSubData>();
+ if (c) {
+ c->Init(target, offset, size, data_shm_id, data_shm_offset);
+ }
+}
+
+void CheckFramebufferStatus(GLenum target,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::CheckFramebufferStatus* c =
+ GetCmdSpace<gles2::cmds::CheckFramebufferStatus>();
+ if (c) {
+ c->Init(target, result_shm_id, result_shm_offset);
+ }
+}
+
+void Clear(GLbitfield mask) {
+ gles2::cmds::Clear* c = GetCmdSpace<gles2::cmds::Clear>();
+ if (c) {
+ c->Init(mask);
+ }
+}
+
+void ClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) {
+ gles2::cmds::ClearColor* c = GetCmdSpace<gles2::cmds::ClearColor>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void ClearDepthf(GLclampf depth) {
+ gles2::cmds::ClearDepthf* c = GetCmdSpace<gles2::cmds::ClearDepthf>();
+ if (c) {
+ c->Init(depth);
+ }
+}
+
+void ClearStencil(GLint s) {
+ gles2::cmds::ClearStencil* c = GetCmdSpace<gles2::cmds::ClearStencil>();
+ if (c) {
+ c->Init(s);
+ }
+}
+
+void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ gles2::cmds::ColorMask* c = GetCmdSpace<gles2::cmds::ColorMask>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void CompileShader(GLuint shader) {
+ gles2::cmds::CompileShader* c = GetCmdSpace<gles2::cmds::CompileShader>();
+ if (c) {
+ c->Init(shader);
+ }
+}
+
+void CompressedTexImage2DBucket(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLuint bucket_id) {
+ gles2::cmds::CompressedTexImage2DBucket* c =
+ GetCmdSpace<gles2::cmds::CompressedTexImage2DBucket>();
+ if (c) {
+ c->Init(target, level, internalformat, width, height, bucket_id);
+ }
+}
+
+void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLsizei imageSize,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::CompressedTexImage2D* c =
+ GetCmdSpace<gles2::cmds::CompressedTexImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ imageSize,
+ data_shm_id,
+ data_shm_offset);
+ }
+}
+
+void CompressedTexSubImage2DBucket(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLuint bucket_id) {
+ gles2::cmds::CompressedTexSubImage2DBucket* c =
+ GetCmdSpace<gles2::cmds::CompressedTexSubImage2DBucket>();
+ if (c) {
+ c->Init(target, level, xoffset, yoffset, width, height, format, bucket_id);
+ }
+}
+
+void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::CompressedTexSubImage2D* c =
+ GetCmdSpace<gles2::cmds::CompressedTexSubImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ imageSize,
+ data_shm_id,
+ data_shm_offset);
+ }
+}
+
+void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::CopyTexImage2D* c = GetCmdSpace<gles2::cmds::CopyTexImage2D>();
+ if (c) {
+ c->Init(target, level, internalformat, x, y, width, height);
+ }
+}
+
+void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::CopyTexSubImage2D* c =
+ GetCmdSpace<gles2::cmds::CopyTexSubImage2D>();
+ if (c) {
+ c->Init(target, level, xoffset, yoffset, x, y, width, height);
+ }
+}
+
+void CreateProgram(uint32_t client_id) {
+ gles2::cmds::CreateProgram* c = GetCmdSpace<gles2::cmds::CreateProgram>();
+ if (c) {
+ c->Init(client_id);
+ }
+}
+
+void CreateShader(GLenum type, uint32_t client_id) {
+ gles2::cmds::CreateShader* c = GetCmdSpace<gles2::cmds::CreateShader>();
+ if (c) {
+ c->Init(type, client_id);
+ }
+}
+
+void CullFace(GLenum mode) {
+ gles2::cmds::CullFace* c = GetCmdSpace<gles2::cmds::CullFace>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void DeleteBuffersImmediate(GLsizei n, const GLuint* buffers) {
+ const uint32_t size = gles2::cmds::DeleteBuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteBuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteBuffersImmediate>(size);
+ if (c) {
+ c->Init(n, buffers);
+ }
+}
+
+void DeleteFramebuffersImmediate(GLsizei n, const GLuint* framebuffers) {
+ const uint32_t size =
+ gles2::cmds::DeleteFramebuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteFramebuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteFramebuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, framebuffers);
+ }
+}
+
+void DeleteProgram(GLuint program) {
+ gles2::cmds::DeleteProgram* c = GetCmdSpace<gles2::cmds::DeleteProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void DeleteRenderbuffersImmediate(GLsizei n, const GLuint* renderbuffers) {
+ const uint32_t size =
+ gles2::cmds::DeleteRenderbuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteRenderbuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteRenderbuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, renderbuffers);
+ }
+}
+
+void DeleteShader(GLuint shader) {
+ gles2::cmds::DeleteShader* c = GetCmdSpace<gles2::cmds::DeleteShader>();
+ if (c) {
+ c->Init(shader);
+ }
+}
+
+void DeleteTexturesImmediate(GLsizei n, const GLuint* textures) {
+ const uint32_t size = gles2::cmds::DeleteTexturesImmediate::ComputeSize(n);
+ gles2::cmds::DeleteTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteTexturesImmediate>(size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void DepthFunc(GLenum func) {
+ gles2::cmds::DepthFunc* c = GetCmdSpace<gles2::cmds::DepthFunc>();
+ if (c) {
+ c->Init(func);
+ }
+}
+
+void DepthMask(GLboolean flag) {
+ gles2::cmds::DepthMask* c = GetCmdSpace<gles2::cmds::DepthMask>();
+ if (c) {
+ c->Init(flag);
+ }
+}
+
+void DepthRangef(GLclampf zNear, GLclampf zFar) {
+ gles2::cmds::DepthRangef* c = GetCmdSpace<gles2::cmds::DepthRangef>();
+ if (c) {
+ c->Init(zNear, zFar);
+ }
+}
+
+void DetachShader(GLuint program, GLuint shader) {
+ gles2::cmds::DetachShader* c = GetCmdSpace<gles2::cmds::DetachShader>();
+ if (c) {
+ c->Init(program, shader);
+ }
+}
+
+void Disable(GLenum cap) {
+ gles2::cmds::Disable* c = GetCmdSpace<gles2::cmds::Disable>();
+ if (c) {
+ c->Init(cap);
+ }
+}
+
+void DisableVertexAttribArray(GLuint index) {
+ gles2::cmds::DisableVertexAttribArray* c =
+ GetCmdSpace<gles2::cmds::DisableVertexAttribArray>();
+ if (c) {
+ c->Init(index);
+ }
+}
+
+void DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ gles2::cmds::DrawArrays* c = GetCmdSpace<gles2::cmds::DrawArrays>();
+ if (c) {
+ c->Init(mode, first, count);
+ }
+}
+
+void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ GLuint index_offset) {
+ gles2::cmds::DrawElements* c = GetCmdSpace<gles2::cmds::DrawElements>();
+ if (c) {
+ c->Init(mode, count, type, index_offset);
+ }
+}
+
+void Enable(GLenum cap) {
+ gles2::cmds::Enable* c = GetCmdSpace<gles2::cmds::Enable>();
+ if (c) {
+ c->Init(cap);
+ }
+}
+
+void EnableVertexAttribArray(GLuint index) {
+ gles2::cmds::EnableVertexAttribArray* c =
+ GetCmdSpace<gles2::cmds::EnableVertexAttribArray>();
+ if (c) {
+ c->Init(index);
+ }
+}
+
+void Finish() {
+ gles2::cmds::Finish* c = GetCmdSpace<gles2::cmds::Finish>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void Flush() {
+ gles2::cmds::Flush* c = GetCmdSpace<gles2::cmds::Flush>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ gles2::cmds::FramebufferRenderbuffer* c =
+ GetCmdSpace<gles2::cmds::FramebufferRenderbuffer>();
+ if (c) {
+ c->Init(target, attachment, renderbuffertarget, renderbuffer);
+ }
+}
+
+void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture) {
+ gles2::cmds::FramebufferTexture2D* c =
+ GetCmdSpace<gles2::cmds::FramebufferTexture2D>();
+ if (c) {
+ c->Init(target, attachment, textarget, texture);
+ }
+}
+
+void FrontFace(GLenum mode) {
+ gles2::cmds::FrontFace* c = GetCmdSpace<gles2::cmds::FrontFace>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void GenBuffersImmediate(GLsizei n, GLuint* buffers) {
+ const uint32_t size = gles2::cmds::GenBuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenBuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenBuffersImmediate>(size);
+ if (c) {
+ c->Init(n, buffers);
+ }
+}
+
+void GenerateMipmap(GLenum target) {
+ gles2::cmds::GenerateMipmap* c = GetCmdSpace<gles2::cmds::GenerateMipmap>();
+ if (c) {
+ c->Init(target);
+ }
+}
+
+void GenFramebuffersImmediate(GLsizei n, GLuint* framebuffers) {
+ const uint32_t size = gles2::cmds::GenFramebuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenFramebuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenFramebuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, framebuffers);
+ }
+}
+
+void GenRenderbuffersImmediate(GLsizei n, GLuint* renderbuffers) {
+ const uint32_t size = gles2::cmds::GenRenderbuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenRenderbuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenRenderbuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, renderbuffers);
+ }
+}
+
+void GenTexturesImmediate(GLsizei n, GLuint* textures) {
+ const uint32_t size = gles2::cmds::GenTexturesImmediate::ComputeSize(n);
+ gles2::cmds::GenTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenTexturesImmediate>(size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void GetActiveAttrib(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetActiveAttrib* c = GetCmdSpace<gles2::cmds::GetActiveAttrib>();
+ if (c) {
+ c->Init(program, index, name_bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetActiveUniform(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetActiveUniform* c =
+ GetCmdSpace<gles2::cmds::GetActiveUniform>();
+ if (c) {
+ c->Init(program, index, name_bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetAttachedShaders(GLuint program,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset,
+ uint32_t result_size) {
+ gles2::cmds::GetAttachedShaders* c =
+ GetCmdSpace<gles2::cmds::GetAttachedShaders>();
+ if (c) {
+ c->Init(program, result_shm_id, result_shm_offset, result_size);
+ }
+}
+
+void GetAttribLocation(GLuint program,
+ uint32_t name_bucket_id,
+ uint32_t location_shm_id,
+ uint32_t location_shm_offset) {
+ gles2::cmds::GetAttribLocation* c =
+ GetCmdSpace<gles2::cmds::GetAttribLocation>();
+ if (c) {
+ c->Init(program, name_bucket_id, location_shm_id, location_shm_offset);
+ }
+}
+
+void GetBooleanv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetBooleanv* c = GetCmdSpace<gles2::cmds::GetBooleanv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetBufferParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetBufferParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetError(uint32_t result_shm_id, uint32_t result_shm_offset) {
+ gles2::cmds::GetError* c = GetCmdSpace<gles2::cmds::GetError>();
+ if (c) {
+ c->Init(result_shm_id, result_shm_offset);
+ }
+}
+
+void GetFloatv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetFloatv* c = GetCmdSpace<gles2::cmds::GetFloatv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetFramebufferAttachmentParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetFramebufferAttachmentParameteriv>();
+ if (c) {
+ c->Init(target, attachment, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetIntegerv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetIntegerv* c = GetCmdSpace<gles2::cmds::GetIntegerv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetProgramiv(GLuint program,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetProgramiv* c = GetCmdSpace<gles2::cmds::GetProgramiv>();
+ if (c) {
+ c->Init(program, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetProgramInfoLog(GLuint program, uint32_t bucket_id) {
+ gles2::cmds::GetProgramInfoLog* c =
+ GetCmdSpace<gles2::cmds::GetProgramInfoLog>();
+ if (c) {
+ c->Init(program, bucket_id);
+ }
+}
+
+void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetRenderbufferParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetRenderbufferParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetShaderiv(GLuint shader,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetShaderiv* c = GetCmdSpace<gles2::cmds::GetShaderiv>();
+ if (c) {
+ c->Init(shader, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetShaderInfoLog(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetShaderInfoLog* c =
+ GetCmdSpace<gles2::cmds::GetShaderInfoLog>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetShaderPrecisionFormat* c =
+ GetCmdSpace<gles2::cmds::GetShaderPrecisionFormat>();
+ if (c) {
+ c->Init(shadertype, precisiontype, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetShaderSource(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetShaderSource* c = GetCmdSpace<gles2::cmds::GetShaderSource>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void GetString(GLenum name, uint32_t bucket_id) {
+ gles2::cmds::GetString* c = GetCmdSpace<gles2::cmds::GetString>();
+ if (c) {
+ c->Init(name, bucket_id);
+ }
+}
+
+void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetTexParameterfv* c =
+ GetCmdSpace<gles2::cmds::GetTexParameterfv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetTexParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetTexParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformfv(GLuint program,
+ GLint location,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetUniformfv* c = GetCmdSpace<gles2::cmds::GetUniformfv>();
+ if (c) {
+ c->Init(program, location, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformiv(GLuint program,
+ GLint location,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetUniformiv* c = GetCmdSpace<gles2::cmds::GetUniformiv>();
+ if (c) {
+ c->Init(program, location, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformLocation(GLuint program,
+ uint32_t name_bucket_id,
+ uint32_t location_shm_id,
+ uint32_t location_shm_offset) {
+ gles2::cmds::GetUniformLocation* c =
+ GetCmdSpace<gles2::cmds::GetUniformLocation>();
+ if (c) {
+ c->Init(program, name_bucket_id, location_shm_id, location_shm_offset);
+ }
+}
+
+void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetVertexAttribfv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribfv>();
+ if (c) {
+ c->Init(index, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetVertexAttribiv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribiv>();
+ if (c) {
+ c->Init(index, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ uint32_t pointer_shm_id,
+ uint32_t pointer_shm_offset) {
+ gles2::cmds::GetVertexAttribPointerv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribPointerv>();
+ if (c) {
+ c->Init(index, pname, pointer_shm_id, pointer_shm_offset);
+ }
+}
+
+void Hint(GLenum target, GLenum mode) {
+ gles2::cmds::Hint* c = GetCmdSpace<gles2::cmds::Hint>();
+ if (c) {
+ c->Init(target, mode);
+ }
+}
+
+void IsBuffer(GLuint buffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsBuffer* c = GetCmdSpace<gles2::cmds::IsBuffer>();
+ if (c) {
+ c->Init(buffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsEnabled(GLenum cap, uint32_t result_shm_id, uint32_t result_shm_offset) {
+ gles2::cmds::IsEnabled* c = GetCmdSpace<gles2::cmds::IsEnabled>();
+ if (c) {
+ c->Init(cap, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsFramebuffer(GLuint framebuffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsFramebuffer* c = GetCmdSpace<gles2::cmds::IsFramebuffer>();
+ if (c) {
+ c->Init(framebuffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsProgram(GLuint program,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsProgram* c = GetCmdSpace<gles2::cmds::IsProgram>();
+ if (c) {
+ c->Init(program, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsRenderbuffer(GLuint renderbuffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsRenderbuffer* c = GetCmdSpace<gles2::cmds::IsRenderbuffer>();
+ if (c) {
+ c->Init(renderbuffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsShader(GLuint shader,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsShader* c = GetCmdSpace<gles2::cmds::IsShader>();
+ if (c) {
+ c->Init(shader, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsTexture(GLuint texture,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsTexture* c = GetCmdSpace<gles2::cmds::IsTexture>();
+ if (c) {
+ c->Init(texture, result_shm_id, result_shm_offset);
+ }
+}
+
+void LineWidth(GLfloat width) {
+ gles2::cmds::LineWidth* c = GetCmdSpace<gles2::cmds::LineWidth>();
+ if (c) {
+ c->Init(width);
+ }
+}
+
+void LinkProgram(GLuint program) {
+ gles2::cmds::LinkProgram* c = GetCmdSpace<gles2::cmds::LinkProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void PixelStorei(GLenum pname, GLint param) {
+ gles2::cmds::PixelStorei* c = GetCmdSpace<gles2::cmds::PixelStorei>();
+ if (c) {
+ c->Init(pname, param);
+ }
+}
+
+void PolygonOffset(GLfloat factor, GLfloat units) {
+ gles2::cmds::PolygonOffset* c = GetCmdSpace<gles2::cmds::PolygonOffset>();
+ if (c) {
+ c->Init(factor, units);
+ }
+}
+
+void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset,
+ GLboolean async) {
+ gles2::cmds::ReadPixels* c = GetCmdSpace<gles2::cmds::ReadPixels>();
+ if (c) {
+ c->Init(x,
+ y,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ async);
+ }
+}
+
+void ReleaseShaderCompiler() {
+ gles2::cmds::ReleaseShaderCompiler* c =
+ GetCmdSpace<gles2::cmds::ReleaseShaderCompiler>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorage* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorage>();
+ if (c) {
+ c->Init(target, internalformat, width, height);
+ }
+}
+
+void SampleCoverage(GLclampf value, GLboolean invert) {
+ gles2::cmds::SampleCoverage* c = GetCmdSpace<gles2::cmds::SampleCoverage>();
+ if (c) {
+ c->Init(value, invert);
+ }
+}
+
+void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::cmds::Scissor* c = GetCmdSpace<gles2::cmds::Scissor>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void ShaderBinary(GLsizei n,
+ uint32_t shaders_shm_id,
+ uint32_t shaders_shm_offset,
+ GLenum binaryformat,
+ uint32_t binary_shm_id,
+ uint32_t binary_shm_offset,
+ GLsizei length) {
+ gles2::cmds::ShaderBinary* c = GetCmdSpace<gles2::cmds::ShaderBinary>();
+ if (c) {
+ c->Init(n,
+ shaders_shm_id,
+ shaders_shm_offset,
+ binaryformat,
+ binary_shm_id,
+ binary_shm_offset,
+ length);
+ }
+}
+
+void ShaderSourceBucket(GLuint shader, uint32_t data_bucket_id) {
+ gles2::cmds::ShaderSourceBucket* c =
+ GetCmdSpace<gles2::cmds::ShaderSourceBucket>();
+ if (c) {
+ c->Init(shader, data_bucket_id);
+ }
+}
+
+void StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ gles2::cmds::StencilFunc* c = GetCmdSpace<gles2::cmds::StencilFunc>();
+ if (c) {
+ c->Init(func, ref, mask);
+ }
+}
+
+void StencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask) {
+ gles2::cmds::StencilFuncSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilFuncSeparate>();
+ if (c) {
+ c->Init(face, func, ref, mask);
+ }
+}
+
+void StencilMask(GLuint mask) {
+ gles2::cmds::StencilMask* c = GetCmdSpace<gles2::cmds::StencilMask>();
+ if (c) {
+ c->Init(mask);
+ }
+}
+
+void StencilMaskSeparate(GLenum face, GLuint mask) {
+ gles2::cmds::StencilMaskSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilMaskSeparate>();
+ if (c) {
+ c->Init(face, mask);
+ }
+}
+
+void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::cmds::StencilOp* c = GetCmdSpace<gles2::cmds::StencilOp>();
+ if (c) {
+ c->Init(fail, zfail, zpass);
+ }
+}
+
+void StencilOpSeparate(GLenum face, GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::cmds::StencilOpSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilOpSeparate>();
+ if (c) {
+ c->Init(face, fail, zfail, zpass);
+ }
+}
+
+void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset) {
+ gles2::cmds::TexImage2D* c = GetCmdSpace<gles2::cmds::TexImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset);
+ }
+}
+
+void TexParameterf(GLenum target, GLenum pname, GLfloat param) {
+ gles2::cmds::TexParameterf* c = GetCmdSpace<gles2::cmds::TexParameterf>();
+ if (c) {
+ c->Init(target, pname, param);
+ }
+}
+
+void TexParameterfvImmediate(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ const uint32_t size = gles2::cmds::TexParameterfvImmediate::ComputeSize();
+ gles2::cmds::TexParameterfvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::TexParameterfvImmediate>(size);
+ if (c) {
+ c->Init(target, pname, params);
+ }
+}
+
+void TexParameteri(GLenum target, GLenum pname, GLint param) {
+ gles2::cmds::TexParameteri* c = GetCmdSpace<gles2::cmds::TexParameteri>();
+ if (c) {
+ c->Init(target, pname, param);
+ }
+}
+
+void TexParameterivImmediate(GLenum target, GLenum pname, const GLint* params) {
+ const uint32_t size = gles2::cmds::TexParameterivImmediate::ComputeSize();
+ gles2::cmds::TexParameterivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::TexParameterivImmediate>(size);
+ if (c) {
+ c->Init(target, pname, params);
+ }
+}
+
+void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ GLboolean internal) {
+ gles2::cmds::TexSubImage2D* c = GetCmdSpace<gles2::cmds::TexSubImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ internal);
+ }
+}
+
+void Uniform1f(GLint location, GLfloat x) {
+ gles2::cmds::Uniform1f* c = GetCmdSpace<gles2::cmds::Uniform1f>();
+ if (c) {
+ c->Init(location, x);
+ }
+}
+
+void Uniform1fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform1fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform1fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform1fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform1i(GLint location, GLint x) {
+ gles2::cmds::Uniform1i* c = GetCmdSpace<gles2::cmds::Uniform1i>();
+ if (c) {
+ c->Init(location, x);
+ }
+}
+
+void Uniform1ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform1ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform1ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform1ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ gles2::cmds::Uniform2f* c = GetCmdSpace<gles2::cmds::Uniform2f>();
+ if (c) {
+ c->Init(location, x, y);
+ }
+}
+
+void Uniform2fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform2fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform2fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform2i(GLint location, GLint x, GLint y) {
+ gles2::cmds::Uniform2i* c = GetCmdSpace<gles2::cmds::Uniform2i>();
+ if (c) {
+ c->Init(location, x, y);
+ }
+}
+
+void Uniform2ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform2ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform2ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform2ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::cmds::Uniform3f* c = GetCmdSpace<gles2::cmds::Uniform3f>();
+ if (c) {
+ c->Init(location, x, y, z);
+ }
+}
+
+void Uniform3fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform3fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform3fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ gles2::cmds::Uniform3i* c = GetCmdSpace<gles2::cmds::Uniform3i>();
+ if (c) {
+ c->Init(location, x, y, z);
+ }
+}
+
+void Uniform3ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform3ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform3ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform3ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform4f(GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
+ gles2::cmds::Uniform4f* c = GetCmdSpace<gles2::cmds::Uniform4f>();
+ if (c) {
+ c->Init(location, x, y, z, w);
+ }
+}
+
+void Uniform4fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform4fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform4fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) {
+ gles2::cmds::Uniform4i* c = GetCmdSpace<gles2::cmds::Uniform4i>();
+ if (c) {
+ c->Init(location, x, y, z, w);
+ }
+}
+
+void Uniform4ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform4ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform4ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform4ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void UniformMatrix2fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix2fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix2fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UniformMatrix3fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix3fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix3fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UniformMatrix4fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix4fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix4fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UseProgram(GLuint program) {
+ gles2::cmds::UseProgram* c = GetCmdSpace<gles2::cmds::UseProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void ValidateProgram(GLuint program) {
+ gles2::cmds::ValidateProgram* c = GetCmdSpace<gles2::cmds::ValidateProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void VertexAttrib1f(GLuint indx, GLfloat x) {
+ gles2::cmds::VertexAttrib1f* c = GetCmdSpace<gles2::cmds::VertexAttrib1f>();
+ if (c) {
+ c->Init(indx, x);
+ }
+}
+
+void VertexAttrib1fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib1fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib1fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib1fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ gles2::cmds::VertexAttrib2f* c = GetCmdSpace<gles2::cmds::VertexAttrib2f>();
+ if (c) {
+ c->Init(indx, x, y);
+ }
+}
+
+void VertexAttrib2fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib2fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib2fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::cmds::VertexAttrib3f* c = GetCmdSpace<gles2::cmds::VertexAttrib3f>();
+ if (c) {
+ c->Init(indx, x, y, z);
+ }
+}
+
+void VertexAttrib3fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib3fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib3fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib4f(GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
+ gles2::cmds::VertexAttrib4f* c = GetCmdSpace<gles2::cmds::VertexAttrib4f>();
+ if (c) {
+ c->Init(indx, x, y, z, w);
+ }
+}
+
+void VertexAttrib4fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib4fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib4fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ GLuint offset) {
+ gles2::cmds::VertexAttribPointer* c =
+ GetCmdSpace<gles2::cmds::VertexAttribPointer>();
+ if (c) {
+ c->Init(indx, size, type, normalized, stride, offset);
+ }
+}
+
+void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::cmds::Viewport* c = GetCmdSpace<gles2::cmds::Viewport>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ gles2::cmds::BlitFramebufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BlitFramebufferCHROMIUM>();
+ if (c) {
+ c->Init(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ }
+}
+
+void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorageMultisampleCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorageMultisampleCHROMIUM>();
+ if (c) {
+ c->Init(target, samples, internalformat, width, height);
+ }
+}
+
+void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorageMultisampleEXT* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorageMultisampleEXT>();
+ if (c) {
+ c->Init(target, samples, internalformat, width, height);
+ }
+}
+
+void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLsizei samples) {
+ gles2::cmds::FramebufferTexture2DMultisampleEXT* c =
+ GetCmdSpace<gles2::cmds::FramebufferTexture2DMultisampleEXT>();
+ if (c) {
+ c->Init(target, attachment, textarget, texture, samples);
+ }
+}
+
+void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::TexStorage2DEXT* c = GetCmdSpace<gles2::cmds::TexStorage2DEXT>();
+ if (c) {
+ c->Init(target, levels, internalFormat, width, height);
+ }
+}
+
+void GenQueriesEXTImmediate(GLsizei n, GLuint* queries) {
+ const uint32_t size = gles2::cmds::GenQueriesEXTImmediate::ComputeSize(n);
+ gles2::cmds::GenQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenQueriesEXTImmediate>(size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void DeleteQueriesEXTImmediate(GLsizei n, const GLuint* queries) {
+ const uint32_t size = gles2::cmds::DeleteQueriesEXTImmediate::ComputeSize(n);
+ gles2::cmds::DeleteQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteQueriesEXTImmediate>(
+ size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void BeginQueryEXT(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::BeginQueryEXT* c = GetCmdSpace<gles2::cmds::BeginQueryEXT>();
+ if (c) {
+ c->Init(target, id, sync_data_shm_id, sync_data_shm_offset);
+ }
+}
+
+void EndQueryEXT(GLenum target, GLuint submit_count) {
+ gles2::cmds::EndQueryEXT* c = GetCmdSpace<gles2::cmds::EndQueryEXT>();
+ if (c) {
+ c->Init(target, submit_count);
+ }
+}
+
+void InsertEventMarkerEXT(GLuint bucket_id) {
+ gles2::cmds::InsertEventMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::InsertEventMarkerEXT>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void PushGroupMarkerEXT(GLuint bucket_id) {
+ gles2::cmds::PushGroupMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::PushGroupMarkerEXT>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void PopGroupMarkerEXT() {
+ gles2::cmds::PopGroupMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::PopGroupMarkerEXT>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void GenVertexArraysOESImmediate(GLsizei n, GLuint* arrays) {
+ const uint32_t size =
+ gles2::cmds::GenVertexArraysOESImmediate::ComputeSize(n);
+ gles2::cmds::GenVertexArraysOESImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenVertexArraysOESImmediate>(
+ size);
+ if (c) {
+ c->Init(n, arrays);
+ }
+}
+
+void DeleteVertexArraysOESImmediate(GLsizei n, const GLuint* arrays) {
+ const uint32_t size =
+ gles2::cmds::DeleteVertexArraysOESImmediate::ComputeSize(n);
+ gles2::cmds::DeleteVertexArraysOESImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::DeleteVertexArraysOESImmediate>(size);
+ if (c) {
+ c->Init(n, arrays);
+ }
+}
+
+void IsVertexArrayOES(GLuint array,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsVertexArrayOES* c =
+ GetCmdSpace<gles2::cmds::IsVertexArrayOES>();
+ if (c) {
+ c->Init(array, result_shm_id, result_shm_offset);
+ }
+}
+
+void BindVertexArrayOES(GLuint array) {
+ gles2::cmds::BindVertexArrayOES* c =
+ GetCmdSpace<gles2::cmds::BindVertexArrayOES>();
+ if (c) {
+ c->Init(array);
+ }
+}
+
+void SwapBuffers() {
+ gles2::cmds::SwapBuffers* c = GetCmdSpace<gles2::cmds::SwapBuffers>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetMaxValueInBufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetMaxValueInBufferCHROMIUM>();
+ if (c) {
+ c->Init(buffer_id, count, type, offset, result_shm_id, result_shm_offset);
+ }
+}
+
+void EnableFeatureCHROMIUM(GLuint bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::EnableFeatureCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::EnableFeatureCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void ResizeCHROMIUM(GLuint width, GLuint height, GLfloat scale_factor) {
+ gles2::cmds::ResizeCHROMIUM* c = GetCmdSpace<gles2::cmds::ResizeCHROMIUM>();
+ if (c) {
+ c->Init(width, height, scale_factor);
+ }
+}
+
+void GetRequestableExtensionsCHROMIUM(uint32_t bucket_id) {
+ gles2::cmds::GetRequestableExtensionsCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetRequestableExtensionsCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void RequestExtensionCHROMIUM(uint32_t bucket_id) {
+ gles2::cmds::RequestExtensionCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::RequestExtensionCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void GetMultipleIntegervCHROMIUM(uint32_t pnames_shm_id,
+ uint32_t pnames_shm_offset,
+ GLuint count,
+ uint32_t results_shm_id,
+ uint32_t results_shm_offset,
+ GLsizeiptr size) {
+ gles2::cmds::GetMultipleIntegervCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetMultipleIntegervCHROMIUM>();
+ if (c) {
+ c->Init(pnames_shm_id,
+ pnames_shm_offset,
+ count,
+ results_shm_id,
+ results_shm_offset,
+ size);
+ }
+}
+
+void GetProgramInfoCHROMIUM(GLuint program, uint32_t bucket_id) {
+ gles2::cmds::GetProgramInfoCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetProgramInfoCHROMIUM>();
+ if (c) {
+ c->Init(program, bucket_id);
+ }
+}
+
+void GetTranslatedShaderSourceANGLE(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetTranslatedShaderSourceANGLE* c =
+ GetCmdSpace<gles2::cmds::GetTranslatedShaderSourceANGLE>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
+ gles2::cmds::PostSubBufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::PostSubBufferCHROMIUM>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ gles2::cmds::TexImageIOSurface2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TexImageIOSurface2DCHROMIUM>();
+ if (c) {
+ c->Init(target, width, height, ioSurfaceId, plane);
+ }
+}
+
+void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ gles2::cmds::CopyTextureCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::CopyTextureCHROMIUM>();
+ if (c) {
+ c->Init(target, source_id, dest_id, level, internalformat, dest_type);
+ }
+}
+
+void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ gles2::cmds::DrawArraysInstancedANGLE* c =
+ GetCmdSpace<gles2::cmds::DrawArraysInstancedANGLE>();
+ if (c) {
+ c->Init(mode, first, count, primcount);
+ }
+}
+
+void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ GLuint index_offset,
+ GLsizei primcount) {
+ gles2::cmds::DrawElementsInstancedANGLE* c =
+ GetCmdSpace<gles2::cmds::DrawElementsInstancedANGLE>();
+ if (c) {
+ c->Init(mode, count, type, index_offset, primcount);
+ }
+}
+
+void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) {
+ gles2::cmds::VertexAttribDivisorANGLE* c =
+ GetCmdSpace<gles2::cmds::VertexAttribDivisorANGLE>();
+ if (c) {
+ c->Init(index, divisor);
+ }
+}
+
+void ProduceTextureCHROMIUMImmediate(GLenum target, const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ProduceTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ProduceTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ProduceTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, mailbox);
+ }
+}
+
+void ProduceTextureDirectCHROMIUMImmediate(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(texture, target, mailbox);
+ }
+}
+
+void ConsumeTextureCHROMIUMImmediate(GLenum target, const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, mailbox);
+ }
+}
+
+void BindUniformLocationCHROMIUMBucket(GLuint program,
+ GLint location,
+ uint32_t name_bucket_id) {
+ gles2::cmds::BindUniformLocationCHROMIUMBucket* c =
+ GetCmdSpace<gles2::cmds::BindUniformLocationCHROMIUMBucket>();
+ if (c) {
+ c->Init(program, location, name_bucket_id);
+ }
+}
+
+void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::cmds::BindTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BindTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target, imageId);
+ }
+}
+
+void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::cmds::ReleaseTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ReleaseTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target, imageId);
+ }
+}
+
+void TraceBeginCHROMIUM(GLuint bucket_id) {
+ gles2::cmds::TraceBeginCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TraceBeginCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void TraceEndCHROMIUM() {
+ gles2::cmds::TraceEndCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TraceEndCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset,
+ uint32_t async_upload_token,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::AsyncTexSubImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::AsyncTexSubImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ type,
+ data_shm_id,
+ data_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ }
+}
+
+void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ uint32_t async_upload_token,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::AsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::AsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ }
+}
+
+void WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ gles2::cmds::WaitAsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitAsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target);
+ }
+}
+
+void WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void DiscardFramebufferEXTImmediate(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ const uint32_t size =
+ gles2::cmds::DiscardFramebufferEXTImmediate::ComputeSize(count);
+ gles2::cmds::DiscardFramebufferEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::DiscardFramebufferEXTImmediate>(size);
+ if (c) {
+ c->Init(target, count, attachments);
+ }
+}
+
+void LoseContextCHROMIUM(GLenum current, GLenum other) {
+ gles2::cmds::LoseContextCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::LoseContextCHROMIUM>();
+ if (c) {
+ c->Init(current, other);
+ }
+}
+
+void WaitSyncPointCHROMIUM(GLuint sync_point) {
+ gles2::cmds::WaitSyncPointCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitSyncPointCHROMIUM>();
+ if (c) {
+ c->Init(sync_point);
+ }
+}
+
+void DrawBuffersEXTImmediate(GLsizei count, const GLenum* bufs) {
+ const uint32_t size =
+ gles2::cmds::DrawBuffersEXTImmediate::ComputeSize(count);
+ gles2::cmds::DrawBuffersEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DrawBuffersEXTImmediate>(size);
+ if (c) {
+ c->Init(count, bufs);
+ }
+}
+
+void DiscardBackbufferCHROMIUM() {
+ gles2::cmds::DiscardBackbufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::DiscardBackbufferCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ gles2::cmds::ScheduleOverlayPlaneCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ScheduleOverlayPlaneCHROMIUM>();
+ if (c) {
+ c->Init(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+ }
+}
+
+void MatrixLoadfCHROMIUMImmediate(GLenum matrixMode, const GLfloat* m) {
+ const uint32_t size =
+ gles2::cmds::MatrixLoadfCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::MatrixLoadfCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::MatrixLoadfCHROMIUMImmediate>(
+ size);
+ if (c) {
+ c->Init(matrixMode, m);
+ }
+}
+
+void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ gles2::cmds::MatrixLoadIdentityCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MatrixLoadIdentityCHROMIUM>();
+ if (c) {
+ c->Init(matrixMode);
+ }
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_impl_export.h b/gpu/command_buffer/client/gles2_impl_export.h
new file mode 100644
index 0000000..ee63565
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_impl_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GLES2_IMPL_IMPLEMENTATION)
+#define GLES2_IMPL_EXPORT __declspec(dllexport)
+#else
+#define GLES2_IMPL_EXPORT __declspec(dllimport)
+#endif // defined(GLES2_IMPL_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GLES2_IMPL_IMPLEMENTATION)
+#define GLES2_IMPL_EXPORT __attribute__((visibility("default")))
+#else
+#define GLES2_IMPL_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GLES2_IMPL_EXPORT
+#endif
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
new file mode 100644
index 0000000..aabfa45
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -0,0 +1,4190 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class to emulate GLES2 over command buffers.
+
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include <algorithm>
+#include <limits>
+#include <map>
+#include <queue>
+#include <set>
+#include <sstream>
+#include <string>
+#include "base/bind.h"
+#include "gpu/command_buffer/client/buffer_tracker.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/trace_event.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+#if defined(__native_client__) && !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+#if defined(GPU_CLIENT_DEBUG)
+#include "base/command_line.h"
+#include "gpu/command_buffer/client/gpu_switches.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+// A 32-bit and 64-bit compatible way of converting a pointer to a GLuint.
+static GLuint ToGLuint(const void* ptr) {
+ return static_cast<GLuint>(reinterpret_cast<size_t>(ptr));
+}
+
+#if !defined(_MSC_VER)
+const size_t GLES2Implementation::kMaxSizeOfSimpleResult;
+const unsigned int GLES2Implementation::kStartingOffset;
+#endif
+
+GLES2Implementation::GLStaticState::GLStaticState() {
+}
+
+GLES2Implementation::GLStaticState::~GLStaticState() {
+}
+
+GLES2Implementation::GLStaticState::IntState::IntState()
+ : max_combined_texture_image_units(0),
+ max_cube_map_texture_size(0),
+ max_fragment_uniform_vectors(0),
+ max_renderbuffer_size(0),
+ max_texture_image_units(0),
+ max_texture_size(0),
+ max_varying_vectors(0),
+ max_vertex_attribs(0),
+ max_vertex_texture_image_units(0),
+ max_vertex_uniform_vectors(0),
+ num_compressed_texture_formats(0),
+ num_shader_binary_formats(0),
+ bind_generates_resource_chromium(0) {}
+
+GLES2Implementation::SingleThreadChecker::SingleThreadChecker(
+ GLES2Implementation* gles2_implementation)
+ : gles2_implementation_(gles2_implementation) {
+ CHECK_EQ(0, gles2_implementation_->use_count_);
+ ++gles2_implementation_->use_count_;
+}
+
+GLES2Implementation::SingleThreadChecker::~SingleThreadChecker() {
+ --gles2_implementation_->use_count_;
+ CHECK_EQ(0, gles2_implementation_->use_count_);
+}
+
+GLES2Implementation::GLES2Implementation(
+ GLES2CmdHelper* helper,
+ ShareGroup* share_group,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control)
+ : helper_(helper),
+ transfer_buffer_(transfer_buffer),
+ angle_pack_reverse_row_order_status_(kUnknownExtensionStatus),
+ chromium_framebuffer_multisample_(kUnknownExtensionStatus),
+ pack_alignment_(4),
+ unpack_alignment_(4),
+ unpack_flip_y_(false),
+ unpack_row_length_(0),
+ unpack_skip_rows_(0),
+ unpack_skip_pixels_(0),
+ pack_reverse_row_order_(false),
+ active_texture_unit_(0),
+ bound_framebuffer_(0),
+ bound_read_framebuffer_(0),
+ bound_renderbuffer_(0),
+ current_program_(0),
+ bound_array_buffer_id_(0),
+ bound_pixel_pack_transfer_buffer_id_(0),
+ bound_pixel_unpack_transfer_buffer_id_(0),
+ async_upload_token_(0),
+ async_upload_sync_(NULL),
+ async_upload_sync_shm_id_(0),
+ async_upload_sync_shm_offset_(0),
+ error_bits_(0),
+ debug_(false),
+ lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
+ use_count_(0),
+ error_message_callback_(NULL),
+ gpu_control_(gpu_control),
+ capabilities_(gpu_control->GetCapabilities()),
+ weak_ptr_factory_(this) {
+ DCHECK(helper);
+ DCHECK(transfer_buffer);
+ DCHECK(gpu_control);
+
+ std::stringstream ss;
+ ss << std::hex << this;
+ this_in_hex_ = ss.str();
+
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ debug_ = CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUClientLogging);
+ });
+
+ share_group_ =
+ (share_group ? share_group : new ShareGroup(bind_generates_resource));
+ DCHECK(share_group_->bind_generates_resource() == bind_generates_resource);
+
+ memset(&reserved_ids_, 0, sizeof(reserved_ids_));
+}
+
+bool GLES2Implementation::Initialize(
+ unsigned int starting_transfer_buffer_size,
+ unsigned int min_transfer_buffer_size,
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit) {
+ TRACE_EVENT0("gpu", "GLES2Implementation::Initialize");
+ DCHECK_GE(starting_transfer_buffer_size, min_transfer_buffer_size);
+ DCHECK_LE(starting_transfer_buffer_size, max_transfer_buffer_size);
+ DCHECK_GE(min_transfer_buffer_size, kStartingOffset);
+
+ if (!transfer_buffer_->Initialize(
+ starting_transfer_buffer_size,
+ kStartingOffset,
+ min_transfer_buffer_size,
+ max_transfer_buffer_size,
+ kAlignment,
+ kSizeToFlush)) {
+ return false;
+ }
+
+ mapped_memory_.reset(
+ new MappedMemoryManager(
+ helper_,
+ base::Bind(&GLES2Implementation::PollAsyncUploads,
+ // The mapped memory manager is owned by |this| here, and
+ // since its destroyed before before we destroy ourselves
+ // we don't need extra safety measures for this closure.
+ base::Unretained(this)),
+ mapped_memory_limit));
+
+ unsigned chunk_size = 2 * 1024 * 1024;
+ if (mapped_memory_limit != kNoLimit) {
+ // Use smaller chunks if the client is very memory conscientious.
+ chunk_size = std::min(mapped_memory_limit / 4, chunk_size);
+ }
+ mapped_memory_->set_chunk_size_multiple(chunk_size);
+
+ if (!QueryAndCacheStaticState())
+ return false;
+
+ util_.set_num_compressed_texture_formats(
+ static_state_.int_state.num_compressed_texture_formats);
+ util_.set_num_shader_binary_formats(
+ static_state_.int_state.num_shader_binary_formats);
+
+ texture_units_.reset(
+ new TextureUnit[
+ static_state_.int_state.max_combined_texture_image_units]);
+
+ query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
+ buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
+ gpu_memory_buffer_tracker_.reset(new GpuMemoryBufferTracker(gpu_control_));
+
+ query_id_allocator_.reset(new IdAllocator());
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ GetIdHandler(id_namespaces::kBuffers)->MakeIds(
+ this, kClientSideArrayId, arraysize(reserved_ids_), &reserved_ids_[0]);
+#endif
+
+ vertex_array_object_manager_.reset(new VertexArrayObjectManager(
+ static_state_.int_state.max_vertex_attribs,
+ reserved_ids_[0],
+ reserved_ids_[1]));
+
+ // GL_BIND_GENERATES_RESOURCE_CHROMIUM state must be the same
+ // on Client & Service.
+ if (static_state_.int_state.bind_generates_resource_chromium !=
+ (share_group_->bind_generates_resource() ? 1 : 0)) {
+ SetGLError(GL_INVALID_OPERATION,
+ "Initialize",
+ "Service bind_generates_resource mismatch.");
+ return false;
+ }
+
+ return true;
+}
+
+bool GLES2Implementation::QueryAndCacheStaticState() {
+ TRACE_EVENT0("gpu", "GLES2Implementation::QueryAndCacheStaticState");
+ // Setup query for multiple GetIntegerv's
+ static const GLenum pnames[] = {
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE,
+ GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ GL_MAX_RENDERBUFFER_SIZE,
+ GL_MAX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_TEXTURE_SIZE,
+ GL_MAX_VARYING_VECTORS,
+ GL_MAX_VERTEX_ATTRIBS,
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_VERTEX_UNIFORM_VECTORS,
+ GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ GL_NUM_SHADER_BINARY_FORMATS,
+ GL_BIND_GENERATES_RESOURCE_CHROMIUM,
+ };
+
+ GetMultipleIntegervState integerv_state(
+ pnames, arraysize(pnames),
+ &static_state_.int_state.max_combined_texture_image_units,
+ sizeof(static_state_.int_state));
+ if (!GetMultipleIntegervSetup(&integerv_state)) {
+ return false;
+ }
+
+ // Setup query for multiple GetShaderPrecisionFormat's
+ static const GLenum precision_params[][2] = {
+ { GL_VERTEX_SHADER, GL_LOW_INT },
+ { GL_VERTEX_SHADER, GL_MEDIUM_INT },
+ { GL_VERTEX_SHADER, GL_HIGH_INT },
+ { GL_VERTEX_SHADER, GL_LOW_FLOAT },
+ { GL_VERTEX_SHADER, GL_MEDIUM_FLOAT },
+ { GL_VERTEX_SHADER, GL_HIGH_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_LOW_INT },
+ { GL_FRAGMENT_SHADER, GL_MEDIUM_INT },
+ { GL_FRAGMENT_SHADER, GL_HIGH_INT },
+ { GL_FRAGMENT_SHADER, GL_LOW_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_HIGH_FLOAT },
+ };
+
+ GetAllShaderPrecisionFormatsState precision_state(
+ precision_params, arraysize(precision_params));
+ GetAllShaderPrecisionFormatsSetup(&precision_state);
+
+ // Allocate and partition transfer buffer for all requests
+ void* buffer = transfer_buffer_->Alloc(
+ integerv_state.transfer_buffer_size_needed +
+ precision_state.transfer_buffer_size_needed);
+ if (!buffer) {
+ SetGLError(GL_OUT_OF_MEMORY, "QueryAndCacheStaticState",
+ "Transfer buffer allocation failed.");
+ return false;
+ }
+ integerv_state.buffer = buffer;
+ precision_state.results_buffer =
+ static_cast<char*>(buffer) + integerv_state.transfer_buffer_size_needed;
+
+ // Make all the requests and wait once for all the results.
+ GetMultipleIntegervRequest(&integerv_state);
+ GetAllShaderPrecisionFormatsRequest(&precision_state);
+ WaitForCmd();
+ GetMultipleIntegervOnCompleted(&integerv_state);
+ GetAllShaderPrecisionFormatsOnCompleted(&precision_state);
+
+ // TODO(gman): We should be able to free without a token.
+ transfer_buffer_->FreePendingToken(buffer, helper_->InsertToken());
+ CheckGLError();
+
+ return true;
+}
+
+GLES2Implementation::~GLES2Implementation() {
+ // Make sure the queries are finished otherwise we'll delete the
+ // shared memory (mapped_memory_) which will free the memory used
+ // by the queries. The GPU process when validating that memory is still
+ // shared will fail and abort (ie, it will stop running).
+ WaitForCmd();
+ query_tracker_.reset();
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ DeleteBuffers(arraysize(reserved_ids_), &reserved_ids_[0]);
+#endif
+
+ // Release any per-context data in share group.
+ share_group_->FreeContext(this);
+
+ buffer_tracker_.reset();
+
+ FreeAllAsyncUploadBuffers();
+
+ if (async_upload_sync_) {
+ mapped_memory_->Free(async_upload_sync_);
+ async_upload_sync_ = NULL;
+ }
+
+ // Make sure the commands make it the service.
+ WaitForCmd();
+}
+
+GLES2CmdHelper* GLES2Implementation::helper() const {
+ return helper_;
+}
+
+IdHandlerInterface* GLES2Implementation::GetIdHandler(int namespace_id) const {
+ return share_group_->GetIdHandler(namespace_id);
+}
+
+IdAllocator* GLES2Implementation::GetIdAllocator(int namespace_id) const {
+ if (namespace_id == id_namespaces::kQueries)
+ return query_id_allocator_.get();
+ NOTREACHED();
+ return NULL;
+}
+
+void* GLES2Implementation::GetResultBuffer() {
+ return transfer_buffer_->GetResultBuffer();
+}
+
+int32 GLES2Implementation::GetResultShmId() {
+ return transfer_buffer_->GetShmId();
+}
+
+uint32 GLES2Implementation::GetResultShmOffset() {
+ return transfer_buffer_->GetResultOffset();
+}
+
+void GLES2Implementation::FreeUnusedSharedMemory() {
+ mapped_memory_->FreeUnused();
+}
+
+void GLES2Implementation::FreeEverything() {
+ FreeAllAsyncUploadBuffers();
+ WaitForCmd();
+ query_tracker_->Shrink();
+ FreeUnusedSharedMemory();
+ transfer_buffer_->Free();
+ helper_->FreeRingBuffer();
+}
+
+void GLES2Implementation::RunIfContextNotLost(const base::Closure& callback) {
+ if (!helper_->IsContextLost())
+ callback.Run();
+}
+
+void GLES2Implementation::SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) {
+ gpu_control_->SignalSyncPoint(
+ sync_point,
+ base::Bind(&GLES2Implementation::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(),
+ callback));
+}
+
+void GLES2Implementation::SignalQuery(uint32 query,
+ const base::Closure& callback) {
+ // Flush previously entered commands to ensure ordering with any
+ // glBeginQueryEXT() calls that may have been put into the context.
+ ShallowFlushCHROMIUM();
+ gpu_control_->SignalQuery(
+ query,
+ base::Bind(&GLES2Implementation::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(),
+ callback));
+}
+
+void GLES2Implementation::SetSurfaceVisible(bool visible) {
+ TRACE_EVENT1(
+ "gpu", "GLES2Implementation::SetSurfaceVisible", "visible", visible);
+ // TODO(piman): This probably should be ShallowFlushCHROMIUM().
+ Flush();
+ gpu_control_->SetSurfaceVisible(visible);
+ if (!visible)
+ FreeEverything();
+}
+
+void GLES2Implementation::WaitForCmd() {
+ TRACE_EVENT0("gpu", "GLES2::WaitForCmd");
+ helper_->CommandBufferHelper::Finish();
+}
+
+bool GLES2Implementation::IsExtensionAvailable(const char* ext) {
+ const char* extensions =
+ reinterpret_cast<const char*>(GetStringHelper(GL_EXTENSIONS));
+ if (!extensions)
+ return false;
+
+ int length = strlen(ext);
+ while (true) {
+ int n = strcspn(extensions, " ");
+ if (n == length && 0 == strncmp(ext, extensions, length)) {
+ return true;
+ }
+ if ('\0' == extensions[n]) {
+ return false;
+ }
+ extensions += n + 1;
+ }
+}
+
+bool GLES2Implementation::IsExtensionAvailableHelper(
+ const char* extension, ExtensionStatus* status) {
+ switch (*status) {
+ case kAvailableExtensionStatus:
+ return true;
+ case kUnavailableExtensionStatus:
+ return false;
+ default: {
+ bool available = IsExtensionAvailable(extension);
+ *status = available ? kAvailableExtensionStatus :
+ kUnavailableExtensionStatus;
+ return available;
+ }
+ }
+}
+
+bool GLES2Implementation::IsAnglePackReverseRowOrderAvailable() {
+ return IsExtensionAvailableHelper(
+ "GL_ANGLE_pack_reverse_row_order",
+ &angle_pack_reverse_row_order_status_);
+}
+
+bool GLES2Implementation::IsChromiumFramebufferMultisampleAvailable() {
+ return IsExtensionAvailableHelper(
+ "GL_CHROMIUM_framebuffer_multisample",
+ &chromium_framebuffer_multisample_);
+}
+
+const std::string& GLES2Implementation::GetLogPrefix() const {
+ const std::string& prefix(debug_marker_manager_.GetMarker());
+ return prefix.empty() ? this_in_hex_ : prefix;
+}
+
+GLenum GLES2Implementation::GetError() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetError()");
+ GLenum err = GetGLError();
+ GPU_CLIENT_LOG("returned " << GLES2Util::GetStringError(err));
+ return err;
+}
+
+GLenum GLES2Implementation::GetClientSideGLError() {
+ if (error_bits_ == 0) {
+ return GL_NO_ERROR;
+ }
+
+ GLenum error = GL_NO_ERROR;
+ for (uint32 mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ return error;
+}
+
+GLenum GLES2Implementation::GetGLError() {
+ TRACE_EVENT0("gpu", "GLES2::GetGLError");
+ // Check the GL error first, then our wrapped error.
+ typedef cmds::GetError::Result Result;
+ Result* result = GetResultAs<Result*>();
+ // If we couldn't allocate a result the context is lost.
+ if (!result) {
+ return GL_NO_ERROR;
+ }
+ *result = GL_NO_ERROR;
+ helper_->GetError(GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLenum error = *result;
+ if (error == GL_NO_ERROR) {
+ error = GetClientSideGLError();
+ } else {
+ // There was an error, clear the corresponding wrapped error.
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ }
+ return error;
+}
+
+#if defined(GL_CLIENT_FAIL_GL_ERRORS)
+void GLES2Implementation::FailGLError(GLenum error) {
+ if (error != GL_NO_ERROR) {
+ NOTREACHED() << "Error";
+ }
+}
+// NOTE: Calling GetGLError overwrites data in the result buffer.
+void GLES2Implementation::CheckGLError() {
+ FailGLError(GetGLError());
+}
+#endif // defined(GPU_CLIENT_FAIL_GL_ERRORS)
+
+void GLES2Implementation::SetGLError(
+ GLenum error, const char* function_name, const char* msg) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] Client Synthesized Error: "
+ << GLES2Util::GetStringError(error) << ": "
+ << function_name << ": " << msg);
+ FailGLError(error);
+ if (msg) {
+ last_error_ = msg;
+ }
+ if (error_message_callback_) {
+ std::string temp(GLES2Util::GetStringError(error) + " : " +
+ function_name + ": " + (msg ? msg : ""));
+ error_message_callback_->OnErrorMessage(temp.c_str(), 0);
+ }
+ error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
+
+ if (error == GL_OUT_OF_MEMORY && lose_context_when_out_of_memory_) {
+ helper_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_UNKNOWN_CONTEXT_RESET_ARB);
+ }
+}
+
+void GLES2Implementation::SetGLErrorInvalidEnum(
+ const char* function_name, GLenum value, const char* label) {
+ SetGLError(GL_INVALID_ENUM, function_name,
+ (std::string(label) + " was " +
+ GLES2Util::GetStringEnum(value)).c_str());
+}
+
+bool GLES2Implementation::GetBucketContents(uint32 bucket_id,
+ std::vector<int8>* data) {
+ TRACE_EVENT0("gpu", "GLES2::GetBucketContents");
+ DCHECK(data);
+ const uint32 kStartSize = 32 * 1024;
+ ScopedTransferBufferPtr buffer(kStartSize, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return false;
+ }
+ typedef cmd::GetBucketStart::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ *result = 0;
+ helper_->GetBucketStart(
+ bucket_id, GetResultShmId(), GetResultShmOffset(),
+ buffer.size(), buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ uint32 size = *result;
+ data->resize(size);
+ if (size > 0u) {
+ uint32 offset = 0;
+ while (size) {
+ if (!buffer.valid()) {
+ buffer.Reset(size);
+ if (!buffer.valid()) {
+ return false;
+ }
+ helper_->GetBucketData(
+ bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ }
+ uint32 size_to_copy = std::min(size, buffer.size());
+ memcpy(&(*data)[offset], buffer.address(), size_to_copy);
+ offset += size_to_copy;
+ size -= size_to_copy;
+ buffer.Release();
+ }
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+ return true;
+}
+
+void GLES2Implementation::SetBucketContents(
+ uint32 bucket_id, const void* data, size_t size) {
+ DCHECK(data);
+ helper_->SetBucketSize(bucket_id, size);
+ if (size > 0u) {
+ uint32 offset = 0;
+ while (size) {
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ memcpy(buffer.address(), static_cast<const int8*>(data) + offset,
+ buffer.size());
+ helper_->SetBucketData(
+ bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
+ offset += buffer.size();
+ size -= buffer.size();
+ }
+ }
+}
+
+void GLES2Implementation::SetBucketAsCString(
+ uint32 bucket_id, const char* str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (str) {
+ SetBucketContents(bucket_id, str, strlen(str) + 1);
+ } else {
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+}
+
+bool GLES2Implementation::GetBucketAsString(
+ uint32 bucket_id, std::string* str) {
+ DCHECK(str);
+ std::vector<int8> data;
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (!GetBucketContents(bucket_id, &data)) {
+ return false;
+ }
+ if (data.empty()) {
+ return false;
+ }
+ str->assign(&data[0], &data[0] + data.size() - 1);
+ return true;
+}
+
+void GLES2Implementation::SetBucketAsString(
+ uint32 bucket_id, const std::string& str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ SetBucketContents(bucket_id, str.c_str(), str.size() + 1);
+}
+
+void GLES2Implementation::Disable(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisable("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool changed = false;
+ if (!state_.SetCapabilityState(cap, false, &changed) || changed) {
+ helper_->Disable(cap);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::Enable(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnable("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool changed = false;
+ if (!state_.SetCapabilityState(cap, true, &changed) || changed) {
+ helper_->Enable(cap);
+ }
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsEnabled(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnabled("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool state = false;
+ if (!state_.GetEnabled(cap, &state)) {
+ typedef cmds::IsEnabled::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsEnabled(cap, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ state = (*result) != 0;
+ }
+
+ GPU_CLIENT_LOG("returned " << state);
+ CheckGLError();
+ return state;
+}
+
+bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_combined_texture_image_units;
+ return true;
+ case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
+ *params = static_state_.int_state.max_cube_map_texture_size;
+ return true;
+ case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ *params = static_state_.int_state.max_fragment_uniform_vectors;
+ return true;
+ case GL_MAX_RENDERBUFFER_SIZE:
+ *params = static_state_.int_state.max_renderbuffer_size;
+ return true;
+ case GL_MAX_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_texture_image_units;
+ return true;
+ case GL_MAX_TEXTURE_SIZE:
+ *params = static_state_.int_state.max_texture_size;
+ return true;
+ case GL_MAX_VARYING_VECTORS:
+ *params = static_state_.int_state.max_varying_vectors;
+ return true;
+ case GL_MAX_VERTEX_ATTRIBS:
+ *params = static_state_.int_state.max_vertex_attribs;
+ return true;
+ case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_vertex_texture_image_units;
+ return true;
+ case GL_MAX_VERTEX_UNIFORM_VECTORS:
+ *params = static_state_.int_state.max_vertex_uniform_vectors;
+ return true;
+ case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ *params = static_state_.int_state.num_compressed_texture_formats;
+ return true;
+ case GL_NUM_SHADER_BINARY_FORMATS:
+ *params = static_state_.int_state.num_shader_binary_formats;
+ return true;
+ case GL_ARRAY_BUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_array_buffer_id_;
+ return true;
+ }
+ return false;
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params =
+ vertex_array_object_manager_->bound_element_array_buffer();
+ return true;
+ }
+ return false;
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
+ *params = bound_pixel_pack_transfer_buffer_id_;
+ return true;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
+ *params = bound_pixel_unpack_transfer_buffer_id_;
+ return true;
+ case GL_ACTIVE_TEXTURE:
+ *params = active_texture_unit_ + GL_TEXTURE0;
+ return true;
+ case GL_TEXTURE_BINDING_2D:
+ if (share_group_->bind_generates_resource()) {
+ *params = texture_units_[active_texture_unit_].bound_texture_2d;
+ return true;
+ }
+ return false;
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ if (share_group_->bind_generates_resource()) {
+ *params = texture_units_[active_texture_unit_].bound_texture_cube_map;
+ return true;
+ }
+ return false;
+ case GL_TEXTURE_BINDING_EXTERNAL_OES:
+ if (share_group_->bind_generates_resource()) {
+ *params =
+ texture_units_[active_texture_unit_].bound_texture_external_oes;
+ return true;
+ }
+ return false;
+ case GL_FRAMEBUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_framebuffer_;
+ return true;
+ }
+ return false;
+ case GL_READ_FRAMEBUFFER_BINDING:
+ if (IsChromiumFramebufferMultisampleAvailable() &&
+ share_group_->bind_generates_resource()) {
+ *params = bound_read_framebuffer_;
+ return true;
+ }
+ return false;
+ case GL_RENDERBUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_renderbuffer_;
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool GLES2Implementation::GetBooleanvHelper(GLenum pname, GLboolean* params) {
+ // TODO(gman): Make this handle pnames that return more than 1 value.
+ GLint value;
+ if (!GetHelper(pname, &value)) {
+ return false;
+ }
+ *params = static_cast<GLboolean>(value);
+ return true;
+}
+
+bool GLES2Implementation::GetFloatvHelper(GLenum pname, GLfloat* params) {
+ // TODO(gman): Make this handle pnames that return more than 1 value.
+ GLint value;
+ if (!GetHelper(pname, &value)) {
+ return false;
+ }
+ *params = static_cast<GLfloat>(value);
+ return true;
+}
+
+bool GLES2Implementation::GetIntegervHelper(GLenum pname, GLint* params) {
+ return GetHelper(pname, params);
+}
+
+GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUMHelper(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ typedef cmds::GetMaxValueInBufferCHROMIUM::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return 0;
+ }
+ *result = 0;
+ helper_->GetMaxValueInBufferCHROMIUM(
+ buffer_id, count, type, offset, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ return *result;
+}
+
+GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetMaxValueInBufferCHROMIUM("
+ << buffer_id << ", " << count << ", "
+ << GLES2Util::GetStringGetMaxIndexType(type)
+ << ", " << offset << ")");
+ GLuint result = GetMaxValueInBufferCHROMIUMHelper(
+ buffer_id, count, type, offset);
+ GPU_CLIENT_LOG("returned " << result);
+ CheckGLError();
+ return result;
+}
+
+void GLES2Implementation::RestoreElementAndArrayBuffers(bool restore) {
+ if (restore) {
+ RestoreArrayBuffer(restore);
+ // Restore the element array binding.
+ // We only need to restore it if it wasn't a client side array.
+ if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
+ helper_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ }
+ }
+}
+
+void GLES2Implementation::RestoreArrayBuffer(bool restore) {
+ if (restore) {
+ // Restore the user's current binding.
+ helper_->BindBuffer(GL_ARRAY_BUFFER, bound_array_buffer_id_);
+ }
+}
+
+void GLES2Implementation::DrawElements(
+ GLenum mode, GLsizei count, GLenum type, const void* indices) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawElements("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << count << ", "
+ << GLES2Util::GetStringIndexType(type) << ", "
+ << static_cast<const void*>(indices) << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawElements", "count less than 0.");
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
+ !ValidateOffset("glDrawElements", reinterpret_cast<GLintptr>(indices))) {
+ return;
+ }
+ GLuint offset = 0;
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
+ "glDrawElements", this, helper_, count, type, 0, indices,
+ &offset, &simulated)) {
+ return;
+ }
+ helper_->DrawElements(mode, count, type, offset);
+ RestoreElementAndArrayBuffers(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::Flush() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFlush()");
+ // Insert the cmd to call glFlush
+ helper_->Flush();
+ // Flush our command buffer
+ // (tell the service to execute up to the flush cmd.)
+ helper_->CommandBufferHelper::Flush();
+}
+
+void GLES2Implementation::ShallowFlushCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShallowFlushCHROMIUM()");
+ // Flush our command buffer
+ // (tell the service to execute up to the flush cmd.)
+ helper_->CommandBufferHelper::Flush();
+ // TODO(piman): Add the FreeEverything() logic here.
+}
+
+void GLES2Implementation::Finish() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ FinishHelper();
+}
+
+void GLES2Implementation::ShallowFinishCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2::ShallowFinishCHROMIUM");
+ // Flush our command buffer (tell the service to execute up to the flush cmd
+ // and don't return until it completes).
+ helper_->CommandBufferHelper::Finish();
+}
+
+void GLES2Implementation::FinishHelper() {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFinish()");
+ TRACE_EVENT0("gpu", "GLES2::Finish");
+ // Insert the cmd to call glFinish
+ helper_->Finish();
+ // Finish our command buffer
+ // (tell the service to execute up to the Finish cmd and wait for it to
+ // execute.)
+ helper_->CommandBufferHelper::Finish();
+}
+
+void GLES2Implementation::SwapBuffers() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSwapBuffers()");
+ // TODO(piman): Strictly speaking we'd want to insert the token after the
+ // swap, but the state update with the updated token might not have happened
+ // by the time the SwapBuffer callback gets called, forcing us to synchronize
+ // with the GPU process more than needed. So instead, make it happen before.
+ // All it means is that we could be slightly looser on the kMaxSwapBuffers
+ // semantics if the client doesn't use the callback mechanism, and by chance
+ // the scheduler yields between the InsertToken and the SwapBuffers.
+ swap_buffers_tokens_.push(helper_->InsertToken());
+ helper_->SwapBuffers();
+ helper_->CommandBufferHelper::Flush();
+ // Wait if we added too many swap buffers. Add 1 to kMaxSwapBuffers to
+ // compensate for TODO above.
+ if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
+ helper_->WaitForToken(swap_buffers_tokens_.front());
+ swap_buffers_tokens_.pop();
+ }
+}
+
+void GLES2Implementation::BindAttribLocation(
+ GLuint program, GLuint index, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindAttribLocation("
+ << program << ", " << index << ", " << name << ")");
+ SetBucketAsString(kResultBucketId, name);
+ helper_->BindAttribLocationBucket(program, index, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindUniformLocationCHROMIUM(
+ GLuint program, GLint location, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindUniformLocationCHROMIUM("
+ << program << ", " << location << ", " << name << ")");
+ SetBucketAsString(kResultBucketId, name);
+ helper_->BindUniformLocationCHROMIUMBucket(
+ program, location, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribPointerv(
+ GLuint index, GLenum pname, void** ptr) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribPointer("
+ << index << ", " << GLES2Util::GetStringVertexPointer(pname) << ", "
+ << static_cast<void*>(ptr) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK(int32 num_results = 1);
+ if (!vertex_array_object_manager_->GetAttribPointer(index, pname, ptr)) {
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribPointerv");
+ typedef cmds::GetVertexAttribPointerv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribPointerv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(ptr);
+ GPU_CLIENT_LOG_CODE_BLOCK(num_results = result->GetNumResults());
+ }
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < num_results; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << ptr[i]);
+ }
+ });
+ CheckGLError();
+}
+
+bool GLES2Implementation::DeleteProgramHelper(GLuint program) {
+ if (!GetIdHandler(id_namespaces::kProgramsAndShaders)->FreeIds(
+ this, 1, &program, &GLES2Implementation::DeleteProgramStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteProgram", "id not created by this context.");
+ return false;
+ }
+ if (program == current_program_) {
+ current_program_ = 0;
+ }
+ return true;
+}
+
+void GLES2Implementation::DeleteProgramStub(
+ GLsizei n, const GLuint* programs) {
+ DCHECK_EQ(1, n);
+ share_group_->program_info_manager()->DeleteInfo(programs[0]);
+ helper_->DeleteProgram(programs[0]);
+}
+
+bool GLES2Implementation::DeleteShaderHelper(GLuint shader) {
+ if (!GetIdHandler(id_namespaces::kProgramsAndShaders)->FreeIds(
+ this, 1, &shader, &GLES2Implementation::DeleteShaderStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteShader", "id not created by this context.");
+ return false;
+ }
+ return true;
+}
+
+void GLES2Implementation::DeleteShaderStub(
+ GLsizei n, const GLuint* shaders) {
+ DCHECK_EQ(1, n);
+ share_group_->program_info_manager()->DeleteInfo(shaders[0]);
+ helper_->DeleteShader(shaders[0]);
+}
+
+
+GLint GLES2Implementation::GetAttribLocationHelper(
+ GLuint program, const char* name) {
+ typedef cmds::GetAttribLocation::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return -1;
+ }
+ *result = -1;
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->GetAttribLocation(
+ program, kResultBucketId, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLint GLES2Implementation::GetAttribLocation(
+ GLuint program, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttribLocation(" << program
+ << ", " << name << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetAttribLocation");
+ GLint loc = share_group_->program_info_manager()->GetAttribLocation(
+ this, program, name);
+ GPU_CLIENT_LOG("returned " << loc);
+ CheckGLError();
+ return loc;
+}
+
+GLint GLES2Implementation::GetUniformLocationHelper(
+ GLuint program, const char* name) {
+ typedef cmds::GetUniformLocation::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return -1;
+ }
+ *result = -1;
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->GetUniformLocation(program, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLint GLES2Implementation::GetUniformLocation(
+ GLuint program, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformLocation(" << program
+ << ", " << name << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformLocation");
+ GLint loc = share_group_->program_info_manager()->GetUniformLocation(
+ this, program, name);
+ GPU_CLIENT_LOG("returned " << loc);
+ CheckGLError();
+ return loc;
+}
+
+bool GLES2Implementation::GetProgramivHelper(
+ GLuint program, GLenum pname, GLint* params) {
+ bool got_value = share_group_->program_info_manager()->GetProgramiv(
+ this, program, pname, params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ if (got_value) {
+ GPU_CLIENT_LOG(" 0: " << *params);
+ }
+ });
+ return got_value;
+}
+
+void GLES2Implementation::LinkProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLinkProgram(" << program << ")");
+ helper_->LinkProgram(program);
+ share_group_->program_info_manager()->CreateInfo(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::ShaderBinary(
+ GLsizei n, const GLuint* shaders, GLenum binaryformat, const void* binary,
+ GLsizei length) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShaderBinary(" << n << ", "
+ << static_cast<const void*>(shaders) << ", "
+ << GLES2Util::GetStringEnum(binaryformat) << ", "
+ << static_cast<const void*>(binary) << ", "
+ << length << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderBinary", "n < 0.");
+ return;
+ }
+ if (length < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderBinary", "length < 0.");
+ return;
+ }
+ // TODO(gman): ShaderBinary should use buckets.
+ unsigned int shader_id_size = n * sizeof(*shaders);
+ ScopedTransferBufferArray<GLint> buffer(
+ shader_id_size + length, helper_, transfer_buffer_);
+ if (!buffer.valid() || buffer.num_elements() != shader_id_size + length) {
+ SetGLError(GL_OUT_OF_MEMORY, "glShaderBinary", "out of memory.");
+ return;
+ }
+ void* shader_ids = buffer.elements();
+ void* shader_data = buffer.elements() + shader_id_size;
+ memcpy(shader_ids, shaders, shader_id_size);
+ memcpy(shader_data, binary, length);
+ helper_->ShaderBinary(
+ n,
+ buffer.shm_id(),
+ buffer.offset(),
+ binaryformat,
+ buffer.shm_id(),
+ buffer.offset() + shader_id_size,
+ length);
+ CheckGLError();
+}
+
+void GLES2Implementation::PixelStorei(GLenum pname, GLint param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPixelStorei("
+ << GLES2Util::GetStringPixelStore(pname) << ", "
+ << param << ")");
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ pack_alignment_ = param;
+ break;
+ case GL_UNPACK_ALIGNMENT:
+ unpack_alignment_ = param;
+ break;
+ case GL_UNPACK_ROW_LENGTH_EXT:
+ unpack_row_length_ = param;
+ return;
+ case GL_UNPACK_SKIP_ROWS_EXT:
+ unpack_skip_rows_ = param;
+ return;
+ case GL_UNPACK_SKIP_PIXELS_EXT:
+ unpack_skip_pixels_ = param;
+ return;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ unpack_flip_y_ = (param != 0);
+ break;
+ case GL_PACK_REVERSE_ROW_ORDER_ANGLE:
+ pack_reverse_row_order_ =
+ IsAnglePackReverseRowOrderAvailable() ? (param != 0) : false;
+ break;
+ default:
+ break;
+ }
+ helper_->PixelStorei(pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride,
+ const void* ptr) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribPointer("
+ << index << ", "
+ << size << ", "
+ << GLES2Util::GetStringVertexAttribType(type) << ", "
+ << GLES2Util::GetStringBool(normalized) << ", "
+ << stride << ", "
+ << static_cast<const void*>(ptr) << ")");
+ // Record the info on the client side.
+ if (!vertex_array_object_manager_->SetAttribPointer(
+ bound_array_buffer_id_, index, size, type, normalized, stride, ptr)) {
+ SetGLError(GL_INVALID_OPERATION, "glVertexAttribPointer",
+ "client side arrays are not allowed in vertex array objects.");
+ return;
+ }
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (bound_array_buffer_id_ != 0) {
+ // Only report NON client side buffers to the service.
+ if (!ValidateOffset("glVertexAttribPointer",
+ reinterpret_cast<GLintptr>(ptr))) {
+ return;
+ }
+ helper_->VertexAttribPointer(index, size, type, normalized, stride,
+ ToGLuint(ptr));
+ }
+#else // !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (!ValidateOffset("glVertexAttribPointer",
+ reinterpret_cast<GLintptr>(ptr))) {
+ return;
+ }
+ helper_->VertexAttribPointer(index, size, type, normalized, stride,
+ ToGLuint(ptr));
+#endif // !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttribDivisorANGLE(
+ GLuint index, GLuint divisor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribDivisorANGLE("
+ << index << ", "
+ << divisor << ") ");
+ // Record the info on the client side.
+ vertex_array_object_manager_->SetAttribDivisor(index, divisor);
+ helper_->VertexAttribDivisorANGLE(index, divisor);
+ CheckGLError();
+}
+
+void GLES2Implementation::ShaderSource(
+ GLuint shader,
+ GLsizei count,
+ const GLchar* const* source,
+ const GLint* length) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShaderSource("
+ << shader << ", " << count << ", "
+ << static_cast<const void*>(source) << ", "
+ << static_cast<const void*>(length) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (source[ii]) {
+ if (length && length[ii] >= 0) {
+ std::string str(source[ii], length[ii]);
+ GPU_CLIENT_LOG(" " << ii << ": ---\n" << str << "\n---");
+ } else {
+ GPU_CLIENT_LOG(" " << ii << ": ---\n" << source[ii] << "\n---");
+ }
+ } else {
+ GPU_CLIENT_LOG(" " << ii << ": NULL");
+ }
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderSource", "count < 0");
+ return;
+ }
+ if (shader == 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderSource", "shader == 0");
+ return;
+ }
+
+ // Compute the total size.
+ uint32 total_size = 1;
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (source[ii]) {
+ total_size += (length && length[ii] >= 0) ?
+ static_cast<size_t>(length[ii]) : strlen(source[ii]);
+ }
+ }
+
+ // Concatenate all the strings in to a bucket on the service.
+ helper_->SetBucketSize(kResultBucketId, total_size);
+ uint32 offset = 0;
+ for (GLsizei ii = 0; ii <= count; ++ii) {
+ const char* src = ii < count ? source[ii] : "";
+ if (src) {
+ uint32 size = ii < count ?
+ (length ? static_cast<size_t>(length[ii]) : strlen(src)) : 1;
+ while (size) {
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ memcpy(buffer.address(), src, buffer.size());
+ helper_->SetBucketData(kResultBucketId, offset, buffer.size(),
+ buffer.shm_id(), buffer.offset());
+ offset += buffer.size();
+ src += buffer.size();
+ size -= buffer.size();
+ }
+ }
+ }
+
+ DCHECK_EQ(total_size, offset);
+
+ helper_->ShaderSourceBucket(shader, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferDataHelper(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage) {
+ if (!ValidateSize("glBufferData", size))
+ return;
+
+ GLuint buffer_id;
+ if (GetBoundPixelTransferBuffer(target, "glBufferData", &buffer_id)) {
+ if (!buffer_id) {
+ return;
+ }
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (buffer)
+ RemoveTransferBuffer(buffer);
+
+ // Create new buffer.
+ buffer = buffer_tracker_->CreateBuffer(buffer_id, size);
+ DCHECK(buffer);
+ if (buffer->address() && data)
+ memcpy(buffer->address(), data, size);
+ return;
+ }
+
+ if (size == 0) {
+ return;
+ }
+
+ // If there is no data just send BufferData
+ if (!data) {
+ helper_->BufferData(target, size, 0, 0, usage);
+ return;
+ }
+
+ // See if we can send all at once.
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+
+ if (buffer.size() >= static_cast<unsigned int>(size)) {
+ memcpy(buffer.address(), data, size);
+ helper_->BufferData(
+ target,
+ size,
+ buffer.shm_id(),
+ buffer.offset(),
+ usage);
+ return;
+ }
+
+ // Make the buffer with BufferData then send via BufferSubData
+ helper_->BufferData(target, size, 0, 0, usage);
+ BufferSubDataHelperImpl(target, 0, size, data, &buffer);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferData(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferData("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << size << ", "
+ << static_cast<const void*>(data) << ", "
+ << GLES2Util::GetStringBufferUsage(usage) << ")");
+ BufferDataHelper(target, size, data, usage);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferSubDataHelper(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data) {
+ if (size == 0) {
+ return;
+ }
+
+ if (!ValidateSize("glBufferSubData", size) ||
+ !ValidateOffset("glBufferSubData", offset)) {
+ return;
+ }
+
+ GLuint buffer_id;
+ if (GetBoundPixelTransferBuffer(target, "glBufferSubData", &buffer_id)) {
+ if (!buffer_id) {
+ return;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_VALUE, "glBufferSubData", "unknown buffer");
+ return;
+ }
+
+ int32 end = 0;
+ int32 buffer_size = buffer->size();
+ if (!SafeAddInt32(offset, size, &end) || end > buffer_size) {
+ SetGLError(GL_INVALID_VALUE, "glBufferSubData", "out of range");
+ return;
+ }
+
+ if (buffer->address() && data)
+ memcpy(static_cast<uint8*>(buffer->address()) + offset, data, size);
+ return;
+ }
+
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ BufferSubDataHelperImpl(target, offset, size, data, &buffer);
+}
+
+void GLES2Implementation::BufferSubDataHelperImpl(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data,
+ ScopedTransferBufferPtr* buffer) {
+ DCHECK(buffer);
+ DCHECK_GT(size, 0);
+
+ const int8* source = static_cast<const int8*>(data);
+ while (size) {
+ if (!buffer->valid() || buffer->size() == 0) {
+ buffer->Reset(size);
+ if (!buffer->valid()) {
+ return;
+ }
+ }
+ memcpy(buffer->address(), source, buffer->size());
+ helper_->BufferSubData(
+ target, offset, buffer->size(), buffer->shm_id(), buffer->offset());
+ offset += buffer->size();
+ source += buffer->size();
+ size -= buffer->size();
+ buffer->Release();
+ }
+}
+
+void GLES2Implementation::BufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << offset << ", " << size << ", "
+ << static_cast<const void*>(data) << ")");
+ BufferSubDataHelper(target, offset, size, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) {
+ int32 token = buffer->last_usage_token();
+ uint32 async_token = buffer->last_async_upload_token();
+
+ if (async_token) {
+ if (HasAsyncUploadTokenPassed(async_token)) {
+ buffer_tracker_->Free(buffer);
+ } else {
+ detached_async_upload_memory_.push_back(
+ std::make_pair(buffer->address(), async_token));
+ buffer_tracker_->Unmanage(buffer);
+ }
+ } else if (token) {
+ if (helper_->HasTokenPassed(token))
+ buffer_tracker_->Free(buffer);
+ else
+ buffer_tracker_->FreePendingToken(buffer, token);
+ } else {
+ buffer_tracker_->Free(buffer);
+ }
+
+ buffer_tracker_->RemoveBuffer(buffer->id());
+}
+
+bool GLES2Implementation::GetBoundPixelTransferBuffer(
+ GLenum target,
+ const char* function_name,
+ GLuint* buffer_id) {
+ *buffer_id = 0;
+
+ switch (target) {
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ *buffer_id = bound_pixel_pack_transfer_buffer_id_;
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ *buffer_id = bound_pixel_unpack_transfer_buffer_id_;
+ break;
+ default:
+ // Unknown target
+ return false;
+ }
+ if (!*buffer_id) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "no buffer bound");
+ }
+ return true;
+}
+
+BufferTracker::Buffer*
+GLES2Implementation::GetBoundPixelUnpackTransferBufferIfValid(
+ GLuint buffer_id,
+ const char* function_name,
+ GLuint offset, GLsizei size) {
+ DCHECK(buffer_id);
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "invalid buffer");
+ return NULL;
+ }
+ if (buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "buffer mapped");
+ return NULL;
+ }
+ if ((buffer->size() - offset) < static_cast<GLuint>(size)) {
+ SetGLError(GL_INVALID_VALUE, function_name, "unpack size to large");
+ return NULL;
+ }
+ return buffer;
+}
+
+void GLES2Implementation::CompressedTexImage2D(
+ GLenum target, GLint level, GLenum internalformat, GLsizei width,
+ GLsizei height, GLint border, GLsizei image_size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringCompressedTextureFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << image_size << ", "
+ << static_cast<const void*>(data) << ")");
+ if (width < 0 || height < 0 || level < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "border != 0");
+ return;
+ }
+ if (height == 0 || width == 0) {
+ return;
+ }
+ // If there's a pixel unpack buffer bound use it when issuing
+ // CompressedTexImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(data);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glCompressedTexImage2D", offset, image_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->CompressedTexImage2D(
+ target, level, internalformat, width, height, image_size,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ }
+ return;
+ }
+ SetBucketContents(kResultBucketId, data, image_size);
+ helper_->CompressedTexImage2DBucket(
+ target, level, internalformat, width, height, kResultBucketId);
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::CompressedTexSubImage2D(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLsizei image_size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringCompressedTextureFormat(format) << ", "
+ << image_size << ", "
+ << static_cast<const void*>(data) << ")");
+ if (width < 0 || height < 0 || level < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexSubImage2D", "dimension < 0");
+ return;
+ }
+ // If there's a pixel unpack buffer bound use it when issuing
+ // CompressedTexSubImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(data);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glCompressedTexSubImage2D", offset, image_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, image_size,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+ SetBucketContents(kResultBucketId, data, image_size);
+ helper_->CompressedTexSubImage2DBucket(
+ target, level, xoffset, yoffset, width, height, format, kResultBucketId);
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+namespace {
+
+void CopyRectToBuffer(
+ const void* pixels,
+ uint32 height,
+ uint32 unpadded_row_size,
+ uint32 pixels_padded_row_size,
+ bool flip_y,
+ void* buffer,
+ uint32 buffer_padded_row_size) {
+ const int8* source = static_cast<const int8*>(pixels);
+ int8* dest = static_cast<int8*>(buffer);
+ if (flip_y || pixels_padded_row_size != buffer_padded_row_size) {
+ if (flip_y) {
+ dest += buffer_padded_row_size * (height - 1);
+ }
+ // the last row is copied unpadded at the end
+ for (; height > 1; --height) {
+ memcpy(dest, source, buffer_padded_row_size);
+ if (flip_y) {
+ dest -= buffer_padded_row_size;
+ } else {
+ dest += buffer_padded_row_size;
+ }
+ source += pixels_padded_row_size;
+ }
+ memcpy(dest, source, unpadded_row_size);
+ } else {
+ uint32 size = (height - 1) * pixels_padded_row_size + unpadded_row_size;
+ memcpy(dest, source, size);
+ }
+}
+
+} // anonymous namespace
+
+void GLES2Implementation::TexImage2D(
+ GLenum target, GLint level, GLint internalformat, GLsizei width,
+ GLsizei height, GLint border, GLenum format, GLenum type,
+ const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "border != 0");
+ return;
+ }
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "image size too large");
+ return;
+ }
+
+ // If there's a pixel unpack buffer bound use it when issuing TexImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glTexImage2D", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+
+ // If there's no data just issue TexImage2D
+ if (!pixels) {
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ 0, 0);
+ CheckGLError();
+ return;
+ }
+
+ // compute the advance bytes per row for the src pixels
+ uint32 src_padded_row_size;
+ if (unpack_row_length_ > 0) {
+ if (!GLES2Util::ComputeImagePaddedRowSize(
+ unpack_row_length_, format, type, unpack_alignment_,
+ &src_padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glTexImage2D", "unpack row length too large");
+ return;
+ }
+ } else {
+ src_padded_row_size = padded_row_size;
+ }
+
+ // advance pixels pointer past the skip rows and skip pixels
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_rows_ * src_padded_row_size;
+ if (unpack_skip_pixels_) {
+ uint32 group_size = GLES2Util::ComputeImageGroupSize(format, type);
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_pixels_ * group_size;
+ }
+
+ // Check if we can send it all at once.
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+
+ if (buffer.size() >= size) {
+ CopyRectToBuffer(
+ pixels, height, unpadded_row_size, src_padded_row_size, unpack_flip_y_,
+ buffer.address(), padded_row_size);
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ buffer.shm_id(), buffer.offset());
+ CheckGLError();
+ return;
+ }
+
+ // No, so send it using TexSubImage2D.
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ 0, 0);
+ TexSubImage2DImpl(
+ target, level, 0, 0, width, height, format, type, unpadded_row_size,
+ pixels, src_padded_row_size, GL_TRUE, &buffer, padded_row_size);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexSubImage2D(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexSubImage2D", "dimension < 0");
+ return;
+ }
+ if (height == 0 || width == 0) {
+ return;
+ }
+
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexSubImage2D", "size to large");
+ return;
+ }
+
+ // If there's a pixel unpack buffer bound use it when issuing TexSubImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glTexSubImage2D", offset, temp_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset, false);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+
+ // compute the advance bytes per row for the src pixels
+ uint32 src_padded_row_size;
+ if (unpack_row_length_ > 0) {
+ if (!GLES2Util::ComputeImagePaddedRowSize(
+ unpack_row_length_, format, type, unpack_alignment_,
+ &src_padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glTexImage2D", "unpack row length too large");
+ return;
+ }
+ } else {
+ src_padded_row_size = padded_row_size;
+ }
+
+ // advance pixels pointer past the skip rows and skip pixels
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_rows_ * src_padded_row_size;
+ if (unpack_skip_pixels_) {
+ uint32 group_size = GLES2Util::ComputeImageGroupSize(format, type);
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_pixels_ * group_size;
+ }
+
+ ScopedTransferBufferPtr buffer(temp_size, helper_, transfer_buffer_);
+ TexSubImage2DImpl(
+ target, level, xoffset, yoffset, width, height, format, type,
+ unpadded_row_size, pixels, src_padded_row_size, GL_FALSE, &buffer,
+ padded_row_size);
+ CheckGLError();
+}
+
+static GLint ComputeNumRowsThatFitInBuffer(
+ uint32 padded_row_size, uint32 unpadded_row_size,
+ unsigned int size) {
+ DCHECK_GE(unpadded_row_size, 0u);
+ if (padded_row_size == 0) {
+ return 1;
+ }
+ GLint num_rows = size / padded_row_size;
+ return num_rows + (size - num_rows * padded_row_size) / unpadded_row_size;
+}
+
+void GLES2Implementation::TexSubImage2DImpl(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, uint32 unpadded_row_size,
+ const void* pixels, uint32 pixels_padded_row_size, GLboolean internal,
+ ScopedTransferBufferPtr* buffer, uint32 buffer_padded_row_size) {
+ DCHECK(buffer);
+ DCHECK_GE(level, 0);
+ DCHECK_GT(height, 0);
+ DCHECK_GT(width, 0);
+
+ const int8* source = reinterpret_cast<const int8*>(pixels);
+ GLint original_yoffset = yoffset;
+ // Transfer by rows.
+ while (height) {
+ unsigned int desired_size =
+ buffer_padded_row_size * (height - 1) + unpadded_row_size;
+ if (!buffer->valid() || buffer->size() == 0) {
+ buffer->Reset(desired_size);
+ if (!buffer->valid()) {
+ return;
+ }
+ }
+
+ GLint num_rows = ComputeNumRowsThatFitInBuffer(
+ buffer_padded_row_size, unpadded_row_size, buffer->size());
+ num_rows = std::min(num_rows, height);
+ CopyRectToBuffer(
+ source, num_rows, unpadded_row_size, pixels_padded_row_size,
+ unpack_flip_y_, buffer->address(), buffer_padded_row_size);
+ GLint y = unpack_flip_y_ ? original_yoffset + height - num_rows : yoffset;
+ helper_->TexSubImage2D(
+ target, level, xoffset, y, width, num_rows, format, type,
+ buffer->shm_id(), buffer->offset(), internal);
+ buffer->Release();
+ yoffset += num_rows;
+ source += num_rows * pixels_padded_row_size;
+ height -= num_rows;
+ }
+}
+
+bool GLES2Implementation::GetActiveAttribHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ typedef cmds::GetActiveAttrib::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ // Set as failed so if the command fails we'll recover.
+ result->success = false;
+ helper_->GetActiveAttrib(program, index, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success) {
+ if (size) {
+ *size = result->size;
+ }
+ if (type) {
+ *type = result->type;
+ }
+ if (length || name) {
+ std::vector<int8> str;
+ GetBucketContents(kResultBucketId, &str);
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ str.size() - 1));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, &str[0], max_size);
+ name[max_size] = '\0';
+ }
+ }
+ }
+ return result->success != 0;
+}
+
+void GLES2Implementation::GetActiveAttrib(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetActiveAttrib("
+ << program << ", " << index << ", " << bufsize << ", "
+ << static_cast<const void*>(length) << ", "
+ << static_cast<const void*>(size) << ", "
+ << static_cast<const void*>(type) << ", "
+ << static_cast<const void*>(name) << ", ");
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetActiveAttrib", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetActiveAttrib");
+ bool success = share_group_->program_info_manager()->GetActiveAttrib(
+ this, program, index, bufsize, length, size, type, name);
+ if (success) {
+ if (size) {
+ GPU_CLIENT_LOG(" size: " << *size);
+ }
+ if (type) {
+ GPU_CLIENT_LOG(" type: " << GLES2Util::GetStringEnum(*type));
+ }
+ if (name) {
+ GPU_CLIENT_LOG(" name: " << name);
+ }
+ }
+ CheckGLError();
+}
+
+bool GLES2Implementation::GetActiveUniformHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ typedef cmds::GetActiveUniform::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ // Set as failed so if the command fails we'll recover.
+ result->success = false;
+ helper_->GetActiveUniform(program, index, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success) {
+ if (size) {
+ *size = result->size;
+ }
+ if (type) {
+ *type = result->type;
+ }
+ if (length || name) {
+ std::vector<int8> str;
+ GetBucketContents(kResultBucketId, &str);
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ str.size() - 1));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, &str[0], max_size);
+ name[max_size] = '\0';
+ }
+ }
+ }
+ return result->success != 0;
+}
+
+void GLES2Implementation::GetActiveUniform(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetActiveUniform("
+ << program << ", " << index << ", " << bufsize << ", "
+ << static_cast<const void*>(length) << ", "
+ << static_cast<const void*>(size) << ", "
+ << static_cast<const void*>(type) << ", "
+ << static_cast<const void*>(name) << ", ");
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetActiveUniform", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetActiveUniform");
+ bool success = share_group_->program_info_manager()->GetActiveUniform(
+ this, program, index, bufsize, length, size, type, name);
+ if (success) {
+ if (size) {
+ GPU_CLIENT_LOG(" size: " << *size);
+ }
+ if (type) {
+ GPU_CLIENT_LOG(" type: " << GLES2Util::GetStringEnum(*type));
+ }
+ if (name) {
+ GPU_CLIENT_LOG(" name: " << name);
+ }
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::GetAttachedShaders(
+ GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttachedShaders("
+ << program << ", " << maxcount << ", "
+ << static_cast<const void*>(count) << ", "
+ << static_cast<const void*>(shaders) << ", ");
+ if (maxcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetAttachedShaders", "maxcount < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetAttachedShaders");
+ typedef cmds::GetAttachedShaders::Result Result;
+ uint32 size = Result::ComputeSize(maxcount);
+ Result* result = static_cast<Result*>(transfer_buffer_->Alloc(size));
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetAttachedShaders(
+ program,
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(result),
+ size);
+ int32 token = helper_->InsertToken();
+ WaitForCmd();
+ if (count) {
+ *count = result->GetNumResults();
+ }
+ result->CopyResult(shaders);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ transfer_buffer_->FreePendingToken(result, token);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetShaderPrecisionFormat(
+ GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderPrecisionFormat("
+ << GLES2Util::GetStringShaderType(shadertype) << ", "
+ << GLES2Util::GetStringShaderPrecision(precisiontype) << ", "
+ << static_cast<const void*>(range) << ", "
+ << static_cast<const void*>(precision) << ", ");
+ TRACE_EVENT0("gpu", "GLES2::GetShaderPrecisionFormat");
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+
+ GLStaticState::ShaderPrecisionKey key(shadertype, precisiontype);
+ GLStaticState::ShaderPrecisionMap::iterator i =
+ static_state_.shader_precisions.find(key);
+ if (i != static_state_.shader_precisions.end()) {
+ *result = i->second;
+ } else {
+ result->success = false;
+ helper_->GetShaderPrecisionFormat(
+ shadertype, precisiontype, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success)
+ static_state_.shader_precisions[key] = *result;
+ }
+
+ if (result->success) {
+ if (range) {
+ range[0] = result->min_range;
+ range[1] = result->max_range;
+ GPU_CLIENT_LOG(" min_range: " << range[0]);
+ GPU_CLIENT_LOG(" min_range: " << range[1]);
+ }
+ if (precision) {
+ precision[0] = result->precision;
+ GPU_CLIENT_LOG(" min_range: " << precision[0]);
+ }
+ }
+ CheckGLError();
+}
+
+const GLubyte* GLES2Implementation::GetStringHelper(GLenum name) {
+ const char* result = NULL;
+ // Clears the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetString(name, kResultBucketId);
+ std::string str;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ // Adds extensions implemented on client side only.
+ switch (name) {
+ case GL_EXTENSIONS:
+ str += std::string(str.empty() ? "" : " ") +
+ "GL_CHROMIUM_flipy "
+ "GL_EXT_unpack_subimage "
+ "GL_CHROMIUM_map_sub";
+ if (capabilities_.image)
+ str += " GL_CHROMIUM_image GL_CHROMIUM_gpu_memory_buffer_image";
+ if (capabilities_.future_sync_points)
+ str += " GL_CHROMIUM_future_sync_point";
+ break;
+ default:
+ break;
+ }
+
+ // Because of WebGL the extensions can change. We have to cache each unique
+ // result since we don't know when the client will stop referring to a
+ // previous one it queries.
+ GLStringMap::iterator it = gl_strings_.find(name);
+ if (it == gl_strings_.end()) {
+ std::set<std::string> strings;
+ std::pair<GLStringMap::iterator, bool> insert_result =
+ gl_strings_.insert(std::make_pair(name, strings));
+ DCHECK(insert_result.second);
+ it = insert_result.first;
+ }
+ std::set<std::string>& string_set = it->second;
+ std::set<std::string>::const_iterator sit = string_set.find(str);
+ if (sit != string_set.end()) {
+ result = sit->c_str();
+ } else {
+ std::pair<std::set<std::string>::const_iterator, bool> insert_result =
+ string_set.insert(str);
+ DCHECK(insert_result.second);
+ result = insert_result.first->c_str();
+ }
+ }
+ return reinterpret_cast<const GLubyte*>(result);
+}
+
+const GLubyte* GLES2Implementation::GetString(GLenum name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetString("
+ << GLES2Util::GetStringStringType(name) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetString");
+ const GLubyte* result = GetStringHelper(name);
+ GPU_CLIENT_LOG(" returned " << reinterpret_cast<const char*>(result));
+ CheckGLError();
+ return result;
+}
+
+void GLES2Implementation::GetUniformfv(
+ GLuint program, GLint location, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformfv("
+ << program << ", " << location << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformfv");
+ typedef cmds::GetUniformfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetUniformfv(
+ program, location, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetUniformiv(
+ GLuint program, GLint location, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformiv("
+ << program << ", " << location << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformiv");
+ typedef cmds::GetUniformiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetUniformiv(
+ program, location, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GetResultAs<cmds::GetUniformfv::Result*>()->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::ReadPixels(
+ GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format,
+ GLenum type, void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReadPixels("
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringReadPixelFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (width < 0 || height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
+ return;
+ }
+ if (width == 0 || height == 0) {
+ return;
+ }
+
+ // glReadPixel pads the size of each row of pixels by an amount specified by
+ // glPixelStorei. So, we have to take that into account both in the fact that
+ // the pixels returned from the ReadPixel command will include that padding
+ // and that when we copy the results to the user's buffer we need to not
+ // write those padding bytes but leave them as they are.
+
+ TRACE_EVENT0("gpu", "GLES2::ReadPixels");
+ typedef cmds::ReadPixels::Result Result;
+
+ int8* dest = reinterpret_cast<int8*>(pixels);
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, pack_alignment_, &temp_size, &unpadded_row_size,
+ &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glReadPixels", "size too large.");
+ return;
+ }
+
+ if (bound_pixel_pack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_pack_transfer_buffer_id_,
+ "glReadPixels", offset, padded_row_size * height);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->ReadPixels(xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset(),
+ 0, 0, true);
+ CheckGLError();
+ }
+ return;
+ }
+
+ if (!pixels) {
+ SetGLError(GL_INVALID_OPERATION, "glReadPixels", "pixels = NULL");
+ return;
+ }
+
+ // Transfer by rows.
+ // The max rows we can transfer.
+ while (height) {
+ GLsizei desired_size = padded_row_size * height - 1 + unpadded_row_size;
+ ScopedTransferBufferPtr buffer(desired_size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ GLint num_rows = ComputeNumRowsThatFitInBuffer(
+ padded_row_size, unpadded_row_size, buffer.size());
+ num_rows = std::min(num_rows, height);
+ // NOTE: We must look up the address of the result area AFTER allocation
+ // of the transfer buffer since the transfer buffer may be reallocated.
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ *result = 0; // mark as failed.
+ helper_->ReadPixels(
+ xoffset, yoffset, width, num_rows, format, type,
+ buffer.shm_id(), buffer.offset(),
+ GetResultShmId(), GetResultShmOffset(),
+ false);
+ WaitForCmd();
+ if (*result != 0) {
+ // when doing a y-flip we have to iterate through top-to-bottom chunks
+ // of the dst. The service side handles reversing the rows within a
+ // chunk.
+ int8* rows_dst;
+ if (pack_reverse_row_order_) {
+ rows_dst = dest + (height - num_rows) * padded_row_size;
+ } else {
+ rows_dst = dest;
+ }
+ // We have to copy 1 row at a time to avoid writing pad bytes.
+ const int8* src = static_cast<const int8*>(buffer.address());
+ for (GLint yy = 0; yy < num_rows; ++yy) {
+ memcpy(rows_dst, src, unpadded_row_size);
+ rows_dst += padded_row_size;
+ src += padded_row_size;
+ }
+ if (!pack_reverse_row_order_) {
+ dest = rows_dst;
+ }
+ }
+ // If it was not marked as successful exit.
+ if (*result == 0) {
+ return;
+ }
+ yoffset += num_rows;
+ height -= num_rows;
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::ActiveTexture(GLenum texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glActiveTexture("
+ << GLES2Util::GetStringEnum(texture) << ")");
+ GLuint texture_index = texture - GL_TEXTURE0;
+ if (texture_index >= static_cast<GLuint>(
+ static_state_.int_state.max_combined_texture_image_units)) {
+ SetGLErrorInvalidEnum(
+ "glActiveTexture", texture, "texture");
+ return;
+ }
+
+ active_texture_unit_ = texture_index;
+ helper_->ActiveTexture(texture);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenBuffersHelper(
+ GLsizei /* n */, const GLuint* /* buffers */) {
+}
+
+void GLES2Implementation::GenFramebuffersHelper(
+ GLsizei /* n */, const GLuint* /* framebuffers */) {
+}
+
+void GLES2Implementation::GenRenderbuffersHelper(
+ GLsizei /* n */, const GLuint* /* renderbuffers */) {
+}
+
+void GLES2Implementation::GenTexturesHelper(
+ GLsizei /* n */, const GLuint* /* textures */) {
+}
+
+void GLES2Implementation::GenVertexArraysOESHelper(
+ GLsizei n, const GLuint* arrays) {
+ vertex_array_object_manager_->GenVertexArrays(n, arrays);
+}
+
+void GLES2Implementation::GenQueriesEXTHelper(
+ GLsizei /* n */, const GLuint* /* queries */) {
+}
+
+// NOTE #1: On old versions of OpenGL, calling glBindXXX with an unused id
+// generates a new resource. On newer versions of OpenGL they don't. The code
+// related to binding below will need to change if we switch to the new OpenGL
+// model. Specifically it assumes a bind will succeed which is always true in
+// the old model but possibly not true in the new model if another context has
+// deleted the resource.
+
+bool GLES2Implementation::BindBufferHelper(
+ GLenum target, GLuint buffer_id) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_ARRAY_BUFFER:
+ if (bound_array_buffer_id_ != buffer_id) {
+ bound_array_buffer_id_ = buffer_id;
+ changed = true;
+ }
+ break;
+ case GL_ELEMENT_ARRAY_BUFFER:
+ changed = vertex_array_object_manager_->BindElementArray(buffer_id);
+ break;
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ bound_pixel_pack_transfer_buffer_id_ = buffer_id;
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ bound_pixel_unpack_transfer_buffer_id_ = buffer_id;
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kBuffers)->MarkAsUsedForBind(buffer_id);
+ return changed;
+}
+
+bool GLES2Implementation::BindFramebufferHelper(
+ GLenum target, GLuint framebuffer) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_FRAMEBUFFER:
+ if (bound_framebuffer_ != framebuffer ||
+ bound_read_framebuffer_ != framebuffer) {
+ bound_framebuffer_ = framebuffer;
+ bound_read_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ case GL_READ_FRAMEBUFFER:
+ if (!IsChromiumFramebufferMultisampleAvailable()) {
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ if (bound_read_framebuffer_ != framebuffer) {
+ bound_read_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ case GL_DRAW_FRAMEBUFFER:
+ if (!IsChromiumFramebufferMultisampleAvailable()) {
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ if (bound_framebuffer_ != framebuffer) {
+ bound_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ default:
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ GetIdHandler(id_namespaces::kFramebuffers)->MarkAsUsedForBind(framebuffer);
+ return changed;
+}
+
+bool GLES2Implementation::BindRenderbufferHelper(
+ GLenum target, GLuint renderbuffer) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_RENDERBUFFER:
+ if (bound_renderbuffer_ != renderbuffer) {
+ bound_renderbuffer_ = renderbuffer;
+ changed = true;
+ }
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kRenderbuffers)->MarkAsUsedForBind(renderbuffer);
+ return changed;
+}
+
+bool GLES2Implementation::BindTextureHelper(GLenum target, GLuint texture) {
+ // TODO(gman): See note #1 above.
+ // TODO(gman): Change this to false once we figure out why it's failing
+ // on daisy.
+ bool changed = true;
+ TextureUnit& unit = texture_units_[active_texture_unit_];
+ switch (target) {
+ case GL_TEXTURE_2D:
+ if (unit.bound_texture_2d != texture) {
+ unit.bound_texture_2d = texture;
+ changed = true;
+ }
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ if (unit.bound_texture_cube_map != texture) {
+ unit.bound_texture_cube_map = texture;
+ changed = true;
+ }
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ if (unit.bound_texture_external_oes != texture) {
+ unit.bound_texture_external_oes = texture;
+ changed = true;
+ }
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used. even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kTextures)->MarkAsUsedForBind(texture);
+ return changed;
+}
+
+bool GLES2Implementation::BindVertexArrayOESHelper(GLuint array) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ if (!vertex_array_object_manager_->BindVertexArray(array, &changed)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBindVertexArrayOES",
+ "id was not generated with glGenVertexArrayOES");
+ }
+ // Unlike other BindXXXHelpers we don't call MarkAsUsedForBind
+ // because unlike other resources VertexArrayObject ids must
+ // be generated by GenVertexArrays. A random id to Bind will not
+ // generate a new object.
+ return changed;
+}
+
+bool GLES2Implementation::UseProgramHelper(GLuint program) {
+ bool changed = false;
+ if (current_program_ != program) {
+ current_program_ = program;
+ changed = true;
+ }
+ return changed;
+}
+
+bool GLES2Implementation::IsBufferReservedId(GLuint id) {
+ return vertex_array_object_manager_->IsReservedId(id);
+}
+
+void GLES2Implementation::DeleteBuffersHelper(
+ GLsizei n, const GLuint* buffers) {
+ if (!GetIdHandler(id_namespaces::kBuffers)->FreeIds(
+ this, n, buffers, &GLES2Implementation::DeleteBuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteBuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (buffers[ii] == bound_array_buffer_id_) {
+ bound_array_buffer_id_ = 0;
+ }
+ vertex_array_object_manager_->UnbindBuffer(buffers[ii]);
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffers[ii]);
+ if (buffer)
+ RemoveTransferBuffer(buffer);
+
+ if (buffers[ii] == bound_pixel_unpack_transfer_buffer_id_) {
+ bound_pixel_unpack_transfer_buffer_id_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteBuffersStub(
+ GLsizei n, const GLuint* buffers) {
+ helper_->DeleteBuffersImmediate(n, buffers);
+}
+
+
+void GLES2Implementation::DeleteFramebuffersHelper(
+ GLsizei n, const GLuint* framebuffers) {
+ if (!GetIdHandler(id_namespaces::kFramebuffers)->FreeIds(
+ this, n, framebuffers, &GLES2Implementation::DeleteFramebuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteFramebuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (framebuffers[ii] == bound_framebuffer_) {
+ bound_framebuffer_ = 0;
+ }
+ if (framebuffers[ii] == bound_read_framebuffer_) {
+ bound_read_framebuffer_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteFramebuffersStub(
+ GLsizei n, const GLuint* framebuffers) {
+ helper_->DeleteFramebuffersImmediate(n, framebuffers);
+}
+
+void GLES2Implementation::DeleteRenderbuffersHelper(
+ GLsizei n, const GLuint* renderbuffers) {
+ if (!GetIdHandler(id_namespaces::kRenderbuffers)->FreeIds(
+ this, n, renderbuffers, &GLES2Implementation::DeleteRenderbuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteRenderbuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (renderbuffers[ii] == bound_renderbuffer_) {
+ bound_renderbuffer_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteRenderbuffersStub(
+ GLsizei n, const GLuint* renderbuffers) {
+ helper_->DeleteRenderbuffersImmediate(n, renderbuffers);
+}
+
+void GLES2Implementation::DeleteTexturesHelper(
+ GLsizei n, const GLuint* textures) {
+ if (!GetIdHandler(id_namespaces::kTextures)->FreeIds(
+ this, n, textures, &GLES2Implementation::DeleteTexturesStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteTextures", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ for (GLint tt = 0;
+ tt < static_state_.int_state.max_combined_texture_image_units;
+ ++tt) {
+ TextureUnit& unit = texture_units_[tt];
+ if (textures[ii] == unit.bound_texture_2d) {
+ unit.bound_texture_2d = 0;
+ }
+ if (textures[ii] == unit.bound_texture_cube_map) {
+ unit.bound_texture_cube_map = 0;
+ }
+ if (textures[ii] == unit.bound_texture_external_oes) {
+ unit.bound_texture_external_oes = 0;
+ }
+ }
+ }
+}
+
+void GLES2Implementation::DeleteVertexArraysOESHelper(
+ GLsizei n, const GLuint* arrays) {
+ vertex_array_object_manager_->DeleteVertexArrays(n, arrays);
+ if (!GetIdHandler(id_namespaces::kVertexArrays)->FreeIds(
+ this, n, arrays, &GLES2Implementation::DeleteVertexArraysOESStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteVertexArraysOES", "id not created by this context.");
+ return;
+ }
+}
+
+void GLES2Implementation::DeleteVertexArraysOESStub(
+ GLsizei n, const GLuint* arrays) {
+ helper_->DeleteVertexArraysOESImmediate(n, arrays);
+}
+
+void GLES2Implementation::DeleteTexturesStub(
+ GLsizei n, const GLuint* textures) {
+ helper_->DeleteTexturesImmediate(n, textures);
+}
+
+void GLES2Implementation::DisableVertexAttribArray(GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glDisableVertexAttribArray(" << index << ")");
+ vertex_array_object_manager_->SetAttribEnable(index, false);
+ helper_->DisableVertexAttribArray(index);
+ CheckGLError();
+}
+
+void GLES2Implementation::EnableVertexAttribArray(GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableVertexAttribArray("
+ << index << ")");
+ vertex_array_object_manager_->SetAttribEnable(index, true);
+ helper_->EnableVertexAttribArray(index);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawArrays("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << first << ", " << count << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArrays", "count < 0");
+ return;
+ }
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
+ "glDrawArrays", this, helper_, first + count, 0, &simulated)) {
+ return;
+ }
+ helper_->DrawArrays(mode, first, count);
+ RestoreArrayBuffer(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribfv("
+ << index << ", "
+ << GLES2Util::GetStringVertexAttribute(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ uint32 value = 0;
+ if (vertex_array_object_manager_->GetVertexAttrib(index, pname, &value)) {
+ *params = static_cast<float>(value);
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribfv");
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribfv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribiv("
+ << index << ", "
+ << GLES2Util::GetStringVertexAttribute(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ uint32 value = 0;
+ if (vertex_array_object_manager_->GetVertexAttrib(index, pname, &value)) {
+ *params = value;
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribiv");
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribiv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::Swap() {
+ SwapBuffers();
+}
+
+void GLES2Implementation::PartialSwapBuffers(const gfx::Rect& sub_buffer) {
+ PostSubBufferCHROMIUM(
+ sub_buffer.x(), sub_buffer.y(), sub_buffer.width(), sub_buffer.height());
+}
+
+static GLenum GetGLESOverlayTransform(gfx::OverlayTransform plane_transform) {
+ switch (plane_transform) {
+ case gfx::OVERLAY_TRANSFORM_INVALID:
+ break;
+ case gfx::OVERLAY_TRANSFORM_NONE:
+ return GL_OVERLAY_TRANSFORM_NONE_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL:
+ return GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL:
+ return GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_90:
+ return GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_180:
+ return GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_270:
+ return GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM;
+ }
+ NOTREACHED();
+ return GL_OVERLAY_TRANSFORM_NONE_CHROMIUM;
+}
+
+void GLES2Implementation::ScheduleOverlayPlane(
+ int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) {
+ ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ GetGLESOverlayTransform(plane_transform),
+ overlay_texture_id,
+ display_bounds.x(),
+ display_bounds.y(),
+ display_bounds.width(),
+ display_bounds.height(),
+ uv_rect.x(),
+ uv_rect.y(),
+ uv_rect.width(),
+ uv_rect.height());
+}
+
+GLboolean GLES2Implementation::EnableFeatureCHROMIUM(
+ const char* feature) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableFeatureCHROMIUM("
+ << feature << ")");
+ TRACE_EVENT0("gpu", "GLES2::EnableFeatureCHROMIUM");
+ typedef cmds::EnableFeatureCHROMIUM::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ *result = 0;
+ SetBucketAsCString(kResultBucketId, feature);
+ helper_->EnableFeatureCHROMIUM(
+ kResultBucketId, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ GPU_CLIENT_LOG(" returned " << GLES2Util::GetStringBool(*result));
+ return *result;
+}
+
+void* GLES2Implementation::MapBufferSubDataCHROMIUM(
+ GLuint target, GLintptr offset, GLsizeiptr size, GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapBufferSubDataCHROMIUM("
+ << target << ", " << offset << ", " << size << ", "
+ << GLES2Util::GetStringEnum(access) << ")");
+ // NOTE: target is NOT checked because the service will check it
+ // and we don't know what targets are valid.
+ if (access != GL_WRITE_ONLY) {
+ SetGLErrorInvalidEnum(
+ "glMapBufferSubDataCHROMIUM", access, "access");
+ return NULL;
+ }
+ if (!ValidateSize("glMapBufferSubDataCHROMIUM", size) ||
+ !ValidateOffset("glMapBufferSubDataCHROMIUM", offset)) {
+ return NULL;
+ }
+
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+ if (!mem) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMapBufferSubDataCHROMIUM", "out of memory");
+ return NULL;
+ }
+
+ std::pair<MappedBufferMap::iterator, bool> result =
+ mapped_buffers_.insert(std::make_pair(
+ mem,
+ MappedBuffer(
+ access, shm_id, mem, shm_offset, target, offset, size)));
+ DCHECK(result.second);
+ GPU_CLIENT_LOG(" returned " << mem);
+ return mem;
+}
+
+void GLES2Implementation::UnmapBufferSubDataCHROMIUM(const void* mem) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapBufferSubDataCHROMIUM(" << mem << ")");
+ MappedBufferMap::iterator it = mapped_buffers_.find(mem);
+ if (it == mapped_buffers_.end()) {
+ SetGLError(
+ GL_INVALID_VALUE, "UnmapBufferSubDataCHROMIUM", "buffer not mapped");
+ return;
+ }
+ const MappedBuffer& mb = it->second;
+ helper_->BufferSubData(
+ mb.target, mb.offset, mb.size, mb.shm_id, mb.shm_offset);
+ mapped_memory_->FreePendingToken(mb.shm_memory, helper_->InsertToken());
+ mapped_buffers_.erase(it);
+ CheckGLError();
+}
+
+void* GLES2Implementation::MapTexSubImage2DCHROMIUM(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapTexSubImage2DCHROMIUM("
+ << target << ", " << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << GLES2Util::GetStringEnum(access) << ")");
+ if (access != GL_WRITE_ONLY) {
+ SetGLErrorInvalidEnum(
+ "glMapTexSubImage2DCHROMIUM", access, "access");
+ return NULL;
+ }
+ // NOTE: target is NOT checked because the service will check it
+ // and we don't know what targets are valid.
+ if (level < 0 || xoffset < 0 || yoffset < 0 || width < 0 || height < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glMapTexSubImage2DCHROMIUM", "bad dimensions");
+ return NULL;
+ }
+ uint32 size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size, NULL, NULL)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glMapTexSubImage2DCHROMIUM", "image size too large");
+ return NULL;
+ }
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+ if (!mem) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMapTexSubImage2DCHROMIUM", "out of memory");
+ return NULL;
+ }
+
+ std::pair<MappedTextureMap::iterator, bool> result =
+ mapped_textures_.insert(std::make_pair(
+ mem,
+ MappedTexture(
+ access, shm_id, mem, shm_offset,
+ target, level, xoffset, yoffset, width, height, format, type)));
+ DCHECK(result.second);
+ GPU_CLIENT_LOG(" returned " << mem);
+ return mem;
+}
+
+void GLES2Implementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapTexSubImage2DCHROMIUM(" << mem << ")");
+ MappedTextureMap::iterator it = mapped_textures_.find(mem);
+ if (it == mapped_textures_.end()) {
+ SetGLError(
+ GL_INVALID_VALUE, "UnmapTexSubImage2DCHROMIUM", "texture not mapped");
+ return;
+ }
+ const MappedTexture& mt = it->second;
+ helper_->TexSubImage2D(
+ mt.target, mt.level, mt.xoffset, mt.yoffset, mt.width, mt.height,
+ mt.format, mt.type, mt.shm_id, mt.shm_offset, GL_FALSE);
+ mapped_memory_->FreePendingToken(mt.shm_memory, helper_->InsertToken());
+ mapped_textures_.erase(it);
+ CheckGLError();
+}
+
+void GLES2Implementation::ResizeCHROMIUM(GLuint width, GLuint height,
+ float scale_factor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glResizeCHROMIUM("
+ << width << ", " << height << ", " << scale_factor << ")");
+ helper_->ResizeCHROMIUM(width, height, scale_factor);
+ CheckGLError();
+}
+
+const GLchar* GLES2Implementation::GetRequestableExtensionsCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glGetRequestableExtensionsCHROMIUM()");
+ TRACE_EVENT0("gpu",
+ "GLES2Implementation::GetRequestableExtensionsCHROMIUM()");
+ const char* result = NULL;
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetRequestableExtensionsCHROMIUM(kResultBucketId);
+ std::string str;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ // The set of requestable extensions shrinks as we enable
+ // them. Because we don't know when the client will stop referring
+ // to a previous one it queries (see GetString) we need to cache
+ // the unique results.
+ std::set<std::string>::const_iterator sit =
+ requestable_extensions_set_.find(str);
+ if (sit != requestable_extensions_set_.end()) {
+ result = sit->c_str();
+ } else {
+ std::pair<std::set<std::string>::const_iterator, bool> insert_result =
+ requestable_extensions_set_.insert(str);
+ DCHECK(insert_result.second);
+ result = insert_result.first->c_str();
+ }
+ }
+ GPU_CLIENT_LOG(" returned " << result);
+ return reinterpret_cast<const GLchar*>(result);
+}
+
+// TODO(gman): Remove this command. It's here for WebGL but is incompatible
+// with VirtualGL contexts.
+void GLES2Implementation::RequestExtensionCHROMIUM(const char* extension) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRequestExtensionCHROMIUM("
+ << extension << ")");
+ SetBucketAsCString(kResultBucketId, extension);
+ helper_->RequestExtensionCHROMIUM(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+
+ struct ExtensionCheck {
+ const char* extension;
+ ExtensionStatus* status;
+ };
+ const ExtensionCheck checks[] = {
+ {
+ "GL_ANGLE_pack_reverse_row_order",
+ &angle_pack_reverse_row_order_status_,
+ },
+ {
+ "GL_CHROMIUM_framebuffer_multisample",
+ &chromium_framebuffer_multisample_,
+ },
+ };
+ const size_t kNumChecks = sizeof(checks)/sizeof(checks[0]);
+ for (size_t ii = 0; ii < kNumChecks; ++ii) {
+ const ExtensionCheck& check = checks[ii];
+ if (*check.status == kUnavailableExtensionStatus &&
+ !strcmp(extension, check.extension)) {
+ *check.status = kUnknownExtensionStatus;
+ }
+ }
+}
+
+void GLES2Implementation::RateLimitOffscreenContextCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRateLimitOffscreenCHROMIUM()");
+ // Wait if this would add too many rate limit tokens.
+ if (rate_limit_tokens_.size() == kMaxSwapBuffers) {
+ helper_->WaitForToken(rate_limit_tokens_.front());
+ rate_limit_tokens_.pop();
+ }
+ rate_limit_tokens_.push(helper_->InsertToken());
+}
+
+void GLES2Implementation::GetMultipleIntegervCHROMIUM(
+ const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetMultipleIntegervCHROMIUM("
+ << static_cast<const void*>(pnames) << ", "
+ << count << ", " << results << ", "
+ << size << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLuint i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(
+ " " << i << ": " << GLES2Util::GetStringGLState(pnames[i]));
+ }
+ });
+ DCHECK(size >= 0 && FitInt32NonNegative<GLsizeiptr>(size));
+
+ GetMultipleIntegervState state(pnames, count, results, size);
+ if (!GetMultipleIntegervSetup(&state)) {
+ return;
+ }
+ state.buffer = transfer_buffer_->Alloc(state.transfer_buffer_size_needed);
+ if (!state.buffer) {
+ SetGLError(GL_OUT_OF_MEMORY, "glGetMultipleIntegervCHROMIUM",
+ "Transfer buffer allocation failed.");
+ return;
+ }
+ GetMultipleIntegervRequest(&state);
+ WaitForCmd();
+ GetMultipleIntegervOnCompleted(&state);
+
+ GPU_CLIENT_LOG(" returned");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int i = 0; i < state.num_results; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << (results[i]));
+ }
+ });
+
+ // TODO(gman): We should be able to free without a token.
+ transfer_buffer_->FreePendingToken(state.buffer, helper_->InsertToken());
+ CheckGLError();
+}
+
+bool GLES2Implementation::GetMultipleIntegervSetup(
+ GetMultipleIntegervState* state) {
+ state->num_results = 0;
+ for (GLuint ii = 0; ii < state->pnames_count; ++ii) {
+ int num = util_.GLGetNumValuesReturned(state->pnames[ii]);
+ if (!num) {
+ SetGLErrorInvalidEnum(
+ "glGetMultipleIntegervCHROMIUM", state->pnames[ii], "pname");
+ return false;
+ }
+ state->num_results += num;
+ }
+ if (static_cast<size_t>(state->results_size) !=
+ state->num_results * sizeof(GLint)) {
+ SetGLError(GL_INVALID_VALUE, "glGetMultipleIntegervCHROMIUM", "bad size");
+ return false;
+ }
+ for (int ii = 0; ii < state->num_results; ++ii) {
+ if (state->results[ii] != 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glGetMultipleIntegervCHROMIUM", "results not set to zero.");
+ return false;
+ }
+ }
+ state->transfer_buffer_size_needed =
+ state->pnames_count * sizeof(state->pnames[0]) +
+ state->num_results * sizeof(state->results[0]);
+ return true;
+}
+
+void GLES2Implementation::GetMultipleIntegervRequest(
+ GetMultipleIntegervState* state) {
+ GLenum* pnames_buffer = static_cast<GLenum*>(state->buffer);
+ state->results_buffer = pnames_buffer + state->pnames_count;
+ memcpy(pnames_buffer, state->pnames, state->pnames_count * sizeof(GLenum));
+ memset(state->results_buffer, 0, state->num_results * sizeof(GLint));
+ helper_->GetMultipleIntegervCHROMIUM(
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(pnames_buffer),
+ state->pnames_count,
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(state->results_buffer),
+ state->results_size);
+}
+
+void GLES2Implementation::GetMultipleIntegervOnCompleted(
+ GetMultipleIntegervState* state) {
+ memcpy(state->results, state->results_buffer, state->results_size);;
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsSetup(
+ GetAllShaderPrecisionFormatsState* state) {
+ state->transfer_buffer_size_needed =
+ state->precision_params_count *
+ sizeof(cmds::GetShaderPrecisionFormat::Result);
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsRequest(
+ GetAllShaderPrecisionFormatsState* state) {
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(state->results_buffer);
+
+ for (int i = 0; i < state->precision_params_count; i++) {
+ result->success = false;
+ helper_->GetShaderPrecisionFormat(state->precision_params[i][0],
+ state->precision_params[i][1],
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(result));
+ result++;
+ }
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsOnCompleted(
+ GetAllShaderPrecisionFormatsState* state) {
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(state->results_buffer);
+
+ for (int i = 0; i < state->precision_params_count; i++) {
+ if (result->success) {
+ const GLStaticState::ShaderPrecisionKey key(
+ state->precision_params[i][0], state->precision_params[i][1]);
+ static_state_.shader_precisions[key] = *result;
+ }
+ result++;
+ }
+}
+
+void GLES2Implementation::GetProgramInfoCHROMIUMHelper(
+ GLuint program, std::vector<int8>* result) {
+ DCHECK(result);
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetProgramInfoCHROMIUM(program, kResultBucketId);
+ GetBucketContents(kResultBucketId, result);
+}
+
+void GLES2Implementation::GetProgramInfoCHROMIUM(
+ GLuint program, GLsizei bufsize, GLsizei* size, void* info) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ if (bufsize < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glProgramInfoCHROMIUM", "bufsize less than 0.");
+ return;
+ }
+ if (size == NULL) {
+ SetGLError(GL_INVALID_VALUE, "glProgramInfoCHROMIUM", "size is null.");
+ return;
+ }
+ // Make sure they've set size to 0 else the value will be undefined on
+ // lost context.
+ DCHECK_EQ(0, *size);
+ std::vector<int8> result;
+ GetProgramInfoCHROMIUMHelper(program, &result);
+ if (result.empty()) {
+ return;
+ }
+ *size = result.size();
+ if (!info) {
+ return;
+ }
+ if (static_cast<size_t>(bufsize) < result.size()) {
+ SetGLError(GL_INVALID_OPERATION,
+ "glProgramInfoCHROMIUM", "bufsize is too small for result.");
+ return;
+ }
+ memcpy(info, &result[0], result.size());
+}
+
+GLuint GLES2Implementation::CreateStreamTextureCHROMIUM(GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] CreateStreamTextureCHROMIUM("
+ << texture << ")");
+ TRACE_EVENT0("gpu", "GLES2::CreateStreamTextureCHROMIUM");
+ helper_->CommandBufferHelper::Flush();
+ return gpu_control_->CreateStreamTexture(texture);
+}
+
+void GLES2Implementation::PostSubBufferCHROMIUM(
+ GLint x, GLint y, GLint width, GLint height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] PostSubBufferCHROMIUM("
+ << x << ", " << y << ", " << width << ", " << height << ")");
+ TRACE_EVENT2("gpu", "GLES2::PostSubBufferCHROMIUM",
+ "width", width, "height", height);
+
+ // Same flow control as GLES2Implementation::SwapBuffers (see comments there).
+ swap_buffers_tokens_.push(helper_->InsertToken());
+ helper_->PostSubBufferCHROMIUM(x, y, width, height);
+ helper_->CommandBufferHelper::Flush();
+ if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
+ helper_->WaitForToken(swap_buffers_tokens_.front());
+ swap_buffers_tokens_.pop();
+ }
+}
+
+void GLES2Implementation::DeleteQueriesEXTHelper(
+ GLsizei n, const GLuint* queries) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ query_tracker_->RemoveQuery(queries[ii]);
+ query_id_allocator_->FreeID(queries[ii]);
+ }
+
+ helper_->DeleteQueriesEXTImmediate(n, queries);
+}
+
+GLboolean GLES2Implementation::IsQueryEXT(GLuint id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] IsQueryEXT(" << id << ")");
+
+ // TODO(gman): To be spec compliant IDs from other contexts sharing
+ // resources need to return true here even though you can't share
+ // queries across contexts?
+ return query_tracker_->GetQuery(id) != NULL;
+}
+
+void GLES2Implementation::BeginQueryEXT(GLenum target, GLuint id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] BeginQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target)
+ << ", " << id << ")");
+
+ // if any outstanding queries INV_OP
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it != current_queries_.end()) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress");
+ return;
+ }
+
+ // id = 0 INV_OP
+ if (id == 0) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
+ return;
+ }
+
+ // if not GENned INV_OPERATION
+ if (!query_id_allocator_->InUse(id)) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "invalid id");
+ return;
+ }
+
+ // if id does not have an object
+ QueryTracker::Query* query = query_tracker_->GetQuery(id);
+ if (!query) {
+ query = query_tracker_->CreateQuery(id, target);
+ if (!query) {
+ SetGLError(GL_OUT_OF_MEMORY,
+ "glBeginQueryEXT",
+ "transfer buffer allocation failed");
+ return;
+ }
+ } else if (query->target() != target) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match");
+ return;
+ }
+
+ current_queries_[target] = query;
+
+ query->Begin(this);
+ CheckGLError();
+}
+
+void GLES2Implementation::EndQueryEXT(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] EndQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ")");
+ // Don't do anything if the context is lost.
+ if (helper_->IsContextLost()) {
+ return;
+ }
+
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it == current_queries_.end()) {
+ SetGLError(GL_INVALID_OPERATION, "glEndQueryEXT", "no active query");
+ return;
+ }
+
+ QueryTracker::Query* query = it->second;
+ query->End(this);
+ current_queries_.erase(it);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetQueryivEXT(
+ GLenum target, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryivEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ", "
+ << GLES2Util::GetStringQueryParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+
+ if (pname != GL_CURRENT_QUERY_EXT) {
+ SetGLErrorInvalidEnum("glGetQueryivEXT", pname, "pname");
+ return;
+ }
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it != current_queries_.end()) {
+ QueryTracker::Query* query = it->second;
+ *params = query->id();
+ } else {
+ *params = 0;
+ }
+ GPU_CLIENT_LOG(" " << *params);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetQueryObjectuivEXT(
+ GLuint id, GLenum pname, GLuint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryivEXT(" << id << ", "
+ << GLES2Util::GetStringQueryObjectParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+
+ QueryTracker::Query* query = query_tracker_->GetQuery(id);
+ if (!query) {
+ SetGLError(GL_INVALID_OPERATION, "glQueryObjectuivEXT", "unknown query id");
+ return;
+ }
+
+ QueryMap::iterator it = current_queries_.find(query->target());
+ if (it != current_queries_.end()) {
+ SetGLError(
+ GL_INVALID_OPERATION,
+ "glQueryObjectuivEXT", "query active. Did you to call glEndQueryEXT?");
+ return;
+ }
+
+ if (query->NeverUsed()) {
+ SetGLError(
+ GL_INVALID_OPERATION,
+ "glQueryObjectuivEXT", "Never used. Did you call glBeginQueryEXT?");
+ return;
+ }
+
+ switch (pname) {
+ case GL_QUERY_RESULT_EXT:
+ if (!query->CheckResultsAvailable(helper_)) {
+ helper_->WaitForToken(query->token());
+ if (!query->CheckResultsAvailable(helper_)) {
+ FinishHelper();
+ CHECK(query->CheckResultsAvailable(helper_));
+ }
+ }
+ *params = query->GetResult();
+ break;
+ case GL_QUERY_RESULT_AVAILABLE_EXT:
+ *params = query->CheckResultsAvailable(helper_);
+ break;
+ default:
+ SetGLErrorInvalidEnum("glQueryObjectuivEXT", pname, "pname");
+ break;
+ }
+ GPU_CLIENT_LOG(" " << *params);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawArraysInstancedANGLE(
+ GLenum mode, GLint first, GLsizei count, GLsizei primcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawArraysInstancedANGLE("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << first << ", " << count << ", " << primcount << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedANGLE", "count < 0");
+ return;
+ }
+ if (primcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedANGLE", "primcount < 0");
+ return;
+ }
+ if (primcount == 0) {
+ return;
+ }
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
+ "glDrawArraysInstancedANGLE", this, helper_, first + count, primcount,
+ &simulated)) {
+ return;
+ }
+ helper_->DrawArraysInstancedANGLE(mode, first, count, primcount);
+ RestoreArrayBuffer(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawElementsInstancedANGLE(
+ GLenum mode, GLsizei count, GLenum type, const void* indices,
+ GLsizei primcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawElementsInstancedANGLE("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << count << ", "
+ << GLES2Util::GetStringIndexType(type) << ", "
+ << static_cast<const void*>(indices) << ", "
+ << primcount << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glDrawElementsInstancedANGLE", "count less than 0.");
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ if (primcount < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glDrawElementsInstancedANGLE", "primcount < 0");
+ return;
+ }
+ if (primcount == 0) {
+ return;
+ }
+ if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
+ !ValidateOffset("glDrawElementsInstancedANGLE",
+ reinterpret_cast<GLintptr>(indices))) {
+ return;
+ }
+ GLuint offset = 0;
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
+ "glDrawElementsInstancedANGLE", this, helper_, count, type, primcount,
+ indices, &offset, &simulated)) {
+ return;
+ }
+ helper_->DrawElementsInstancedANGLE(mode, count, type, offset, primcount);
+ RestoreElementAndArrayBuffers(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenMailboxCHROMIUM(
+ GLbyte* mailbox) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenMailboxCHROMIUM("
+ << static_cast<const void*>(mailbox) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GenMailboxCHROMIUM");
+
+ gpu::Mailbox result = gpu::Mailbox::Generate();
+ memcpy(mailbox, result.name, sizeof(result.name));
+}
+
+void GLES2Implementation::ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glProduceTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ProduceTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ProduceTextureCHROMIUMImmediate(target, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::ProduceTextureDirectCHROMIUM(
+ GLuint texture, GLenum target, const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glProduceTextureDirectCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ProduceTextureDirectCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ProduceTextureDirectCHROMIUMImmediate(texture, target, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glConsumeTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ConsumeTextureCHROMIUMImmediate(target, data);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateAndConsumeTextureCHROMIUM(
+ GLenum target, const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateAndConsumeTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "CreateAndConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateAndConsumeTextureCHROMIUMImmediate(target,
+ client_id, data);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ CheckGLError();
+ return client_id;
+}
+
+void GLES2Implementation::PushGroupMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPushGroupMarkerEXT("
+ << length << ", " << marker << ")");
+ if (!marker) {
+ marker = "";
+ }
+ SetBucketAsString(
+ kResultBucketId,
+ (length ? std::string(marker, length) : std::string(marker)));
+ helper_->PushGroupMarkerEXT(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ debug_marker_manager_.PushGroup(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2Implementation::InsertEventMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertEventMarkerEXT("
+ << length << ", " << marker << ")");
+ if (!marker) {
+ marker = "";
+ }
+ SetBucketAsString(
+ kResultBucketId,
+ (length ? std::string(marker, length) : std::string(marker)));
+ helper_->InsertEventMarkerEXT(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ debug_marker_manager_.SetMarker(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2Implementation::PopGroupMarkerEXT() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPopGroupMarkerEXT()");
+ helper_->PopGroupMarkerEXT();
+ debug_marker_manager_.PopGroup();
+}
+
+void GLES2Implementation::TraceBeginCHROMIUM(const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceBeginCHROMIUM("
+ << name << ")");
+ if (current_trace_name_.get()) {
+ SetGLError(GL_INVALID_OPERATION, "glTraceBeginCHROMIUM",
+ "trace already running");
+ return;
+ }
+ TRACE_EVENT_COPY_ASYNC_BEGIN0("gpu", name, this);
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->TraceBeginCHROMIUM(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ current_trace_name_.reset(new std::string(name));
+}
+
+void GLES2Implementation::TraceEndCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceEndCHROMIUM(" << ")");
+ if (!current_trace_name_.get()) {
+ SetGLError(GL_INVALID_OPERATION, "glTraceEndCHROMIUM",
+ "missing begin trace");
+ return;
+ }
+ helper_->TraceEndCHROMIUM();
+ TRACE_EVENT_COPY_ASYNC_END0("gpu", current_trace_name_->c_str(), this);
+ current_trace_name_.reset();
+}
+
+void* GLES2Implementation::MapBufferCHROMIUM(GLuint target, GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapBufferCHROMIUM("
+ << target << ", " << GLES2Util::GetStringEnum(access) << ")");
+ switch (target) {
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ if (access != GL_READ_ONLY) {
+ SetGLError(GL_INVALID_ENUM, "glMapBufferCHROMIUM", "bad access mode");
+ return NULL;
+ }
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ if (access != GL_WRITE_ONLY) {
+ SetGLError(GL_INVALID_ENUM, "glMapBufferCHROMIUM", "bad access mode");
+ return NULL;
+ }
+ break;
+ default:
+ SetGLError(
+ GL_INVALID_ENUM, "glMapBufferCHROMIUM", "invalid target");
+ return NULL;
+ }
+ GLuint buffer_id;
+ GetBoundPixelTransferBuffer(target, "glMapBufferCHROMIUM", &buffer_id);
+ if (!buffer_id) {
+ return NULL;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glMapBufferCHROMIUM", "invalid buffer");
+ return NULL;
+ }
+ if (buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glMapBufferCHROMIUM", "already mapped");
+ return NULL;
+ }
+ // Here we wait for previous transfer operations to be finished.
+ // TODO(hubbe): AsyncTex(Sub)Image2dCHROMIUM does not currently work
+ // with this method of synchronization. Until this is fixed,
+ // MapBufferCHROMIUM will not block even if the transfer is not ready
+ // for these calls.
+ if (buffer->last_usage_token()) {
+ helper_->WaitForToken(buffer->last_usage_token());
+ buffer->set_last_usage_token(0);
+ }
+ buffer->set_mapped(true);
+
+ GPU_CLIENT_LOG(" returned " << buffer->address());
+ CheckGLError();
+ return buffer->address();
+}
+
+GLboolean GLES2Implementation::UnmapBufferCHROMIUM(GLuint target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapBufferCHROMIUM(" << target << ")");
+ GLuint buffer_id;
+ if (!GetBoundPixelTransferBuffer(target, "glMapBufferCHROMIUM", &buffer_id)) {
+ SetGLError(GL_INVALID_ENUM, "glUnmapBufferCHROMIUM", "invalid target");
+ }
+ if (!buffer_id) {
+ return false;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapBufferCHROMIUM", "invalid buffer");
+ return false;
+ }
+ if (!buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapBufferCHROMIUM", "not mapped");
+ return false;
+ }
+ buffer->set_mapped(false);
+ CheckGLError();
+ return true;
+}
+
+bool GLES2Implementation::EnsureAsyncUploadSync() {
+ if (async_upload_sync_)
+ return true;
+
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(sizeof(AsyncUploadSync),
+ &shm_id,
+ &shm_offset);
+ if (!mem)
+ return false;
+
+ async_upload_sync_shm_id_ = shm_id;
+ async_upload_sync_shm_offset_ = shm_offset;
+ async_upload_sync_ = static_cast<AsyncUploadSync*>(mem);
+ async_upload_sync_->Reset();
+
+ return true;
+}
+
+uint32 GLES2Implementation::NextAsyncUploadToken() {
+ async_upload_token_++;
+ if (async_upload_token_ == 0)
+ async_upload_token_++;
+ return async_upload_token_;
+}
+
+void GLES2Implementation::PollAsyncUploads() {
+ if (!async_upload_sync_)
+ return;
+
+ if (helper_->IsContextLost()) {
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ }
+ return;
+ }
+
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ if (HasAsyncUploadTokenPassed(it->second)) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ } else {
+ break;
+ }
+ }
+}
+
+void GLES2Implementation::FreeAllAsyncUploadBuffers() {
+ // Free all completed unmanaged async uploads buffers.
+ PollAsyncUploads();
+
+ // Synchronously free rest of the unmanaged async upload buffers.
+ if (!detached_async_upload_memory_.empty()) {
+ WaitAllAsyncTexImage2DCHROMIUM();
+ WaitForCmd();
+ PollAsyncUploads();
+ }
+}
+
+void GLES2Implementation::AsyncTexImage2DCHROMIUM(
+ GLenum target, GLint level, GLenum internalformat, GLsizei width,
+ GLsizei height, GLint border, GLenum format, GLenum type,
+ const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "border != 0");
+ return;
+ }
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "image size too large");
+ return;
+ }
+
+ // If there's no data/buffer just issue the AsyncTexImage2D
+ if (!pixels && !bound_pixel_unpack_transfer_buffer_id_) {
+ helper_->AsyncTexImage2DCHROMIUM(
+ target, level, internalformat, width, height, format, type,
+ 0, 0, 0, 0, 0);
+ return;
+ }
+
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
+ return;
+ }
+
+ // Otherwise, async uploads require a transfer buffer to be bound.
+ // TODO(hubbe): Make MapBufferCHROMIUM block if someone tries to re-use
+ // the buffer before the transfer is finished. (Currently such
+ // synchronization has to be handled manually.)
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glAsyncTexImage2DCHROMIUM", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
+ helper_->AsyncTexImage2DCHROMIUM(
+ target, level, internalformat, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
+ }
+}
+
+void GLES2Implementation::AsyncTexSubImage2DCHROMIUM(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glAsyncTexSubImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glAsyncTexSubImage2DCHROMIUM", "dimension < 0");
+ return;
+ }
+
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glAsyncTexSubImage2DCHROMIUM", "size to large");
+ return;
+ }
+
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
+ return;
+ }
+
+ // Async uploads require a transfer buffer to be bound.
+ // TODO(hubbe): Make MapBufferCHROMIUM block if someone tries to re-use
+ // the buffer before the transfer is finished. (Currently such
+ // synchronization has to be handled manually.)
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glAsyncTexSubImage2DCHROMIUM", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
+ helper_->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
+ }
+}
+
+void GLES2Implementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glWaitAsyncTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureTarget(target) << ")");
+ helper_->WaitAsyncTexImage2DCHROMIUM(target);
+ CheckGLError();
+}
+
+void GLES2Implementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glWaitAllAsyncTexImage2DCHROMIUM()");
+ helper_->WaitAllAsyncTexImage2DCHROMIUM();
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::InsertSyncPointCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertSyncPointCHROMIUM");
+ helper_->CommandBufferHelper::Flush();
+ return gpu_control_->InsertSyncPoint();
+}
+
+GLuint GLES2Implementation::InsertFutureSyncPointCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertFutureSyncPointCHROMIUM");
+ DCHECK(capabilities_.future_sync_points);
+ return gpu_control_->InsertFutureSyncPoint();
+}
+
+void GLES2Implementation::RetireSyncPointCHROMIUM(GLuint sync_point) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRetireSyncPointCHROMIUM("
+ << sync_point << ")");
+ DCHECK(capabilities_.future_sync_points);
+ helper_->CommandBufferHelper::Flush();
+ gpu_control_->RetireSyncPoint(sync_point);
+}
+
+namespace {
+
+bool ValidImageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB:
+ case GL_RGBA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ValidImageUsage(GLenum usage) {
+ switch (usage) {
+ case GL_MAP_CHROMIUM:
+ case GL_SCANOUT_CHROMIUM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+GLuint GLES2Implementation::CreateImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ if (width <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "width <= 0");
+ return 0;
+ }
+
+ if (height <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "height <= 0");
+ return 0;
+ }
+ // Flush the command stream to ensure ordering in case the newly
+ // returned image_id has recently been in use with a different buffer.
+ helper_->CommandBufferHelper::Flush();
+
+ // Create new buffer.
+ GLuint buffer_id = gpu_memory_buffer_tracker_->CreateBuffer(
+ width, height, internalformat, usage);
+ if (buffer_id == 0) {
+ SetGLError(GL_OUT_OF_MEMORY, "glCreateImageCHROMIUM", "out of GPU memory.");
+ return 0;
+ }
+ return buffer_id;
+}
+
+GLuint GLES2Implementation::CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glCreateImageCHROMIUM(" << width << ", "
+ << height << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << GLES2Util::GetStringTextureInternalFormat(usage) << ")");
+ GLuint image_id =
+ CreateImageCHROMIUMHelper(width, height, internalformat, usage);
+ CheckGLError();
+ return image_id;
+}
+
+void GLES2Implementation::DestroyImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glDestroyImageCHROMIUM", "invalid image");
+ return;
+ }
+
+ // Flush the command stream to make sure all pending commands
+ // that may refer to the image_id are executed on the service side.
+ helper_->CommandBufferHelper::Flush();
+ gpu_memory_buffer_tracker_->RemoveBuffer(image_id);
+}
+
+void GLES2Implementation::DestroyImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDestroyImageCHROMIUM("
+ << image_id << ")");
+ DestroyImageCHROMIUMHelper(image_id);
+ CheckGLError();
+}
+
+void GLES2Implementation::UnmapImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapImageCHROMIUM", "invalid image");
+ return;
+ }
+
+ if (!gpu_buffer->IsMapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapImageCHROMIUM", "not mapped");
+ return;
+ }
+ gpu_buffer->Unmap();
+}
+
+void GLES2Implementation::UnmapImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUnmapImageCHROMIUM("
+ << image_id << ")");
+
+ UnmapImageCHROMIUMHelper(image_id);
+ CheckGLError();
+}
+
+void* GLES2Implementation::MapImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glMapImageCHROMIUM", "invalid image");
+ return NULL;
+ }
+
+ if (gpu_buffer->IsMapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glMapImageCHROMIUM", "already mapped");
+ return NULL;
+ }
+
+ return gpu_buffer->Map();
+}
+
+void* GLES2Implementation::MapImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapImageCHROMIUM(" << image_id
+ << ")");
+
+ void* mapped = MapImageCHROMIUMHelper(image_id);
+ CheckGLError();
+ return mapped;
+}
+
+void GLES2Implementation::GetImageParameterivCHROMIUMHelper(
+ GLuint image_id, GLenum pname, GLint* params) {
+ if (pname != GL_IMAGE_ROWBYTES_CHROMIUM) {
+ SetGLError(GL_INVALID_ENUM, "glGetImageParameterivCHROMIUM",
+ "invalid parameter");
+ return;
+ }
+
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glGetImageParameterivCHROMIUM",
+ "invalid image");
+ return;
+ }
+
+ if (!gpu_buffer->IsMapped()) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glGetImageParameterivCHROMIUM", "not mapped");
+ return;
+ }
+
+ *params = gpu_buffer->GetStride();
+}
+
+void GLES2Implementation::GetImageParameterivCHROMIUM(
+ GLuint image_id, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glImageParameterivCHROMIUM("
+ << image_id << ", "
+ << GLES2Util::GetStringBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GetImageParameterivCHROMIUMHelper(image_id, pname, params);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateGpuMemoryBufferImageCHROMIUMHelper(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ if (width <= 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glCreateGpuMemoryBufferImageCHROMIUM", "width <= 0");
+ return 0;
+ }
+
+ if (height <= 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "height <= 0");
+ return 0;
+ }
+
+ if (!ValidImageFormat(internalformat)) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "invalid format");
+ return 0;
+ }
+
+ if (!ValidImageUsage(usage)) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "invalid usage");
+ return 0;
+ }
+
+ // Flush the command stream to ensure ordering in case the newly
+ // returned image_id has recently been in use with a different buffer.
+ helper_->CommandBufferHelper::Flush();
+
+ // Create new buffer.
+ GLuint buffer_id = gpu_memory_buffer_tracker_->CreateBuffer(
+ width,
+ height,
+ internalformat == GL_RGBA ? GL_RGBA8_OES : GL_RGB8_OES,
+ usage);
+ if (buffer_id == 0) {
+ SetGLError(GL_OUT_OF_MEMORY,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "out of GPU memory");
+ return 0;
+ }
+ return buffer_id;
+}
+
+GLuint GLES2Implementation::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glCreateGpuMemoryBufferImageCHROMIUM(" << width
+ << ", " << height << ", "
+ << GLES2Util::GetStringImageInternalFormat(internalformat)
+ << ", " << GLES2Util::GetStringImageUsage(usage) << ")");
+ GLuint image_id = CreateGpuMemoryBufferImageCHROMIUMHelper(
+ width, height, internalformat, usage);
+ CheckGLError();
+ return image_id;
+}
+
+bool GLES2Implementation::ValidateSize(const char* func, GLsizeiptr size) {
+ if (size < 0) {
+ SetGLError(GL_INVALID_VALUE, func, "size < 0");
+ return false;
+ }
+ if (!FitInt32NonNegative<GLsizeiptr>(size)) {
+ SetGLError(GL_INVALID_OPERATION, func, "size more than 32-bit");
+ return false;
+ }
+ return true;
+}
+
+bool GLES2Implementation::ValidateOffset(const char* func, GLintptr offset) {
+ if (offset < 0) {
+ SetGLError(GL_INVALID_VALUE, func, "offset < 0");
+ return false;
+ }
+ if (!FitInt32NonNegative<GLintptr>(offset)) {
+ SetGLError(GL_INVALID_OPERATION, func, "offset more than 32-bit");
+ return false;
+ }
+ return true;
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_implementation_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h
new file mode 100644
index 0000000..84cda7e
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation.h
@@ -0,0 +1,828 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
+
+#include <GLES2/gl2.h>
+
+#include <list>
+#include <map>
+#include <queue>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/client/buffer_tracker.h"
+#include "gpu/command_buffer/client/client_context_state.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/ref_counted.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/client/share_group.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+
+#if !defined(NDEBUG) && !defined(__native_client__) && !defined(GLES2_CONFORMANCE_TESTS) // NOLINT
+ #if defined(GLES2_INLINE_OPTIMIZATION)
+ // TODO(gman): Replace with macros that work with inline optmization.
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK()
+ #define GPU_CLIENT_LOG(args)
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code)
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
+ #else
+ #include "base/logging.h"
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK() SingleThreadChecker checker(this);
+ #define GPU_CLIENT_LOG(args) DLOG_IF(INFO, debug_) << args;
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code) code
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code) code
+ #define GPU_CLIENT_DEBUG
+ #endif
+#else
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK()
+ #define GPU_CLIENT_LOG(args)
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code)
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
+#endif
+
+#if defined(GPU_CLIENT_DEBUG)
+ // Set to 1 to have the client fail when a GL error is generated.
+ // This helps find bugs in the renderer since the debugger stops on the error.
+# if 0
+# define GL_CLIENT_FAIL_GL_ERRORS
+# endif
+#endif
+
+// Check that destination pointers point to initialized memory.
+// When the context is lost, calling GL function has no effect so if destination
+// pointers point to initialized memory it can often lead to crash bugs. eg.
+//
+// GLsizei len;
+// glGetShaderSource(shader, max_size, &len, buffer);
+// std::string src(buffer, buffer + len); // len can be uninitialized here!!!
+//
+// Because this check is not official GL this check happens only on Chrome code,
+// not Pepper.
+//
+// If it was up to us we'd just always write to the destination but the OpenGL
+// spec defines the behavior of OpenGL functions, not us. :-(
+#if defined(__native_client__) || defined(GLES2_CONFORMANCE_TESTS)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v)
+ #define GPU_CLIENT_DCHECK(v)
+#elif defined(GPU_DCHECK)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
+ #define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
+#elif defined(DCHECK)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
+ #define GPU_CLIENT_DCHECK(v) DCHECK(v)
+#else
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
+ #define GPU_CLIENT_DCHECK(v) ASSERT(v)
+#endif
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(ptr && \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(!ptr || \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+struct GLUniformDefinitionCHROMIUM;
+
+namespace gpu {
+
+class GpuControl;
+class ScopedTransferBufferPtr;
+class TransferBufferInterface;
+
+namespace gles2 {
+
+class ImageFactory;
+class VertexArrayObjectManager;
+
+class GLES2ImplementationErrorMessageCallback {
+ public:
+ virtual ~GLES2ImplementationErrorMessageCallback() { }
+ virtual void OnErrorMessage(const char* msg, int id) = 0;
+};
+
+// This class emulates GLES2 over command buffers. It can be used by a client
+// program so that the program does not need deal with shared memory and command
+// buffer management. See gl2_lib.h. Note that there is a performance gain to
+// be had by changing your code to use command buffers directly by using the
+// GLES2CmdHelper but that entails changing your code to use and deal with
+// shared memory and synchronization issues.
+class GLES2_IMPL_EXPORT GLES2Implementation
+ : NON_EXPORTED_BASE(public GLES2Interface),
+ NON_EXPORTED_BASE(public ContextSupport) {
+ public:
+ enum MappedMemoryLimit {
+ kNoLimit = MappedMemoryManager::kNoLimit,
+ };
+
+ // Stores GL state that never changes.
+ struct GLES2_IMPL_EXPORT GLStaticState {
+ GLStaticState();
+ ~GLStaticState();
+
+ struct GLES2_IMPL_EXPORT IntState {
+ IntState();
+ GLint max_combined_texture_image_units;
+ GLint max_cube_map_texture_size;
+ GLint max_fragment_uniform_vectors;
+ GLint max_renderbuffer_size;
+ GLint max_texture_image_units;
+ GLint max_texture_size;
+ GLint max_varying_vectors;
+ GLint max_vertex_attribs;
+ GLint max_vertex_texture_image_units;
+ GLint max_vertex_uniform_vectors;
+ GLint num_compressed_texture_formats;
+ GLint num_shader_binary_formats;
+ GLint bind_generates_resource_chromium;
+ };
+ IntState int_state;
+
+ typedef std::pair<GLenum, GLenum> ShaderPrecisionKey;
+ typedef std::map<ShaderPrecisionKey,
+ cmds::GetShaderPrecisionFormat::Result>
+ ShaderPrecisionMap;
+ ShaderPrecisionMap shader_precisions;
+ };
+
+ // The maxiumum result size from simple GL get commands.
+ static const size_t kMaxSizeOfSimpleResult = 16 * sizeof(uint32); // NOLINT.
+
+ // used for testing only. If more things are reseved add them here.
+ static const unsigned int kStartingOffset = kMaxSizeOfSimpleResult;
+
+ // Size in bytes to issue async flush for transfer buffer.
+ static const unsigned int kSizeToFlush = 256 * 1024;
+
+ // The bucket used for results. Public for testing only.
+ static const uint32 kResultBucketId = 1;
+
+ // Alignment of allocations.
+ static const unsigned int kAlignment = 4;
+
+ // GL names for the buffers used to emulate client side buffers.
+ static const GLuint kClientSideArrayId = 0xFEDCBA98u;
+ static const GLuint kClientSideElementArrayId = 0xFEDCBA99u;
+
+ // Number of swap buffers allowed before waiting.
+ static const size_t kMaxSwapBuffers = 2;
+
+ GLES2Implementation(GLES2CmdHelper* helper,
+ ShareGroup* share_group,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control);
+
+ virtual ~GLES2Implementation();
+
+ bool Initialize(
+ unsigned int starting_transfer_buffer_size,
+ unsigned int min_transfer_buffer_size,
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit);
+
+ // The GLES2CmdHelper being used by this GLES2Implementation. You can use
+ // this to issue cmds at a lower level for certain kinds of optimization.
+ GLES2CmdHelper* helper() const;
+
+ // Gets client side generated errors.
+ GLenum GetClientSideGLError();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_implementation_autogen.h"
+
+ virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+ virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+ virtual void GetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) OVERRIDE;
+ virtual void GetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) OVERRIDE;
+
+ // ContextSupport implementation.
+ virtual void Swap() OVERRIDE;
+ virtual void PartialSwapBuffers(const gfx::Rect& sub_buffer) OVERRIDE;
+ virtual void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) OVERRIDE;
+ virtual GLuint InsertFutureSyncPointCHROMIUM() OVERRIDE;
+ virtual void RetireSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+
+ void GetProgramInfoCHROMIUMHelper(GLuint program, std::vector<int8>* result);
+ GLint GetAttribLocationHelper(GLuint program, const char* name);
+ GLint GetUniformLocationHelper(GLuint program, const char* name);
+ bool GetActiveAttribHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name);
+ bool GetActiveUniformHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name);
+
+ void FreeUnusedSharedMemory();
+ void FreeEverything();
+
+ // ContextSupport implementation.
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SignalQuery(uint32 query,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+
+ void SetErrorMessageCallback(
+ GLES2ImplementationErrorMessageCallback* callback) {
+ error_message_callback_ = callback;
+ }
+
+ ShareGroup* share_group() const {
+ return share_group_.get();
+ }
+
+ const Capabilities& capabilities() const {
+ return capabilities_;
+ }
+
+ GpuControl* gpu_control() {
+ return gpu_control_;
+ }
+
+ ShareGroupContextData* share_group_context_data() {
+ return &share_group_context_data_;
+ }
+
+ private:
+ friend class GLES2ImplementationTest;
+ friend class VertexArrayObjectManager;
+
+ // Used to track whether an extension is available
+ enum ExtensionStatus {
+ kAvailableExtensionStatus,
+ kUnavailableExtensionStatus,
+ kUnknownExtensionStatus
+ };
+
+ // Base class for mapped resources.
+ struct MappedResource {
+ MappedResource(GLenum _access, int _shm_id, void* mem, unsigned int offset)
+ : access(_access),
+ shm_id(_shm_id),
+ shm_memory(mem),
+ shm_offset(offset) {
+ }
+
+ // access mode. Currently only GL_WRITE_ONLY is valid
+ GLenum access;
+
+ // Shared memory ID for buffer.
+ int shm_id;
+
+ // Address of shared memory
+ void* shm_memory;
+
+ // Offset of shared memory
+ unsigned int shm_offset;
+ };
+
+ // Used to track mapped textures.
+ struct MappedTexture : public MappedResource {
+ MappedTexture(
+ GLenum access,
+ int shm_id,
+ void* shm_mem,
+ unsigned int shm_offset,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type)
+ : MappedResource(access, shm_id, shm_mem, shm_offset),
+ target(_target),
+ level(_level),
+ xoffset(_xoffset),
+ yoffset(_yoffset),
+ width(_width),
+ height(_height),
+ format(_format),
+ type(_type) {
+ }
+
+ // These match the arguments to TexSubImage2D.
+ GLenum target;
+ GLint level;
+ GLint xoffset;
+ GLint yoffset;
+ GLsizei width;
+ GLsizei height;
+ GLenum format;
+ GLenum type;
+ };
+
+ // Used to track mapped buffers.
+ struct MappedBuffer : public MappedResource {
+ MappedBuffer(
+ GLenum access,
+ int shm_id,
+ void* shm_mem,
+ unsigned int shm_offset,
+ GLenum _target,
+ GLintptr _offset,
+ GLsizeiptr _size)
+ : MappedResource(access, shm_id, shm_mem, shm_offset),
+ target(_target),
+ offset(_offset),
+ size(_size) {
+ }
+
+ // These match the arguments to BufferSubData.
+ GLenum target;
+ GLintptr offset;
+ GLsizeiptr size;
+ };
+
+ struct TextureUnit {
+ TextureUnit()
+ : bound_texture_2d(0),
+ bound_texture_cube_map(0),
+ bound_texture_external_oes(0) {}
+
+ // texture currently bound to this unit's GL_TEXTURE_2D with glBindTexture
+ GLuint bound_texture_2d;
+
+ // texture currently bound to this unit's GL_TEXTURE_CUBE_MAP with
+ // glBindTexture
+ GLuint bound_texture_cube_map;
+
+ // texture currently bound to this unit's GL_TEXTURE_EXTERNAL_OES with
+ // glBindTexture
+ GLuint bound_texture_external_oes;
+ };
+
+ // Checks for single threaded access.
+ class SingleThreadChecker {
+ public:
+ explicit SingleThreadChecker(GLES2Implementation* gles2_implementation);
+ ~SingleThreadChecker();
+
+ private:
+ GLES2Implementation* gles2_implementation_;
+ };
+
+ // Gets the value of the result.
+ template <typename T>
+ T GetResultAs() {
+ return static_cast<T>(GetResultBuffer());
+ }
+
+ void* GetResultBuffer();
+ int32 GetResultShmId();
+ uint32 GetResultShmOffset();
+
+ bool QueryAndCacheStaticState();
+
+ // Helpers used to batch synchronous GetIntergerv calls with other
+ // synchronous calls.
+ struct GetMultipleIntegervState {
+ GetMultipleIntegervState(const GLenum* pnames, GLuint pnames_count,
+ GLint* results, GLsizeiptr results_size)
+ : pnames(pnames),
+ pnames_count(pnames_count),
+ results(results),
+ results_size(results_size)
+ { }
+ // inputs
+ const GLenum* pnames;
+ GLuint pnames_count;
+ // outputs
+ GLint* results;
+ GLsizeiptr results_size;
+ // transfer buffer
+ int num_results;
+ int transfer_buffer_size_needed;
+ void* buffer;
+ void* results_buffer;
+ };
+ bool GetMultipleIntegervSetup(
+ GetMultipleIntegervState* state);
+ void GetMultipleIntegervRequest(
+ GetMultipleIntegervState* state);
+ void GetMultipleIntegervOnCompleted(
+ GetMultipleIntegervState* state);
+
+ // Helpers used to batch synchronous GetShaderPrecision calls with other
+ // synchronous calls.
+ struct GetAllShaderPrecisionFormatsState {
+ GetAllShaderPrecisionFormatsState(
+ const GLenum (*precision_params)[2],
+ int precision_params_count)
+ : precision_params(precision_params),
+ precision_params_count(precision_params_count)
+ { }
+ const GLenum (*precision_params)[2];
+ int precision_params_count;
+ int transfer_buffer_size_needed;
+ void* results_buffer;
+ };
+ void GetAllShaderPrecisionFormatsSetup(
+ GetAllShaderPrecisionFormatsState* state);
+ void GetAllShaderPrecisionFormatsRequest(
+ GetAllShaderPrecisionFormatsState* state);
+ void GetAllShaderPrecisionFormatsOnCompleted(
+ GetAllShaderPrecisionFormatsState* state);
+
+ // Lazily determines if GL_ANGLE_pack_reverse_row_order is available
+ bool IsAnglePackReverseRowOrderAvailable();
+ bool IsChromiumFramebufferMultisampleAvailable();
+
+ bool IsExtensionAvailableHelper(
+ const char* extension, ExtensionStatus* status);
+
+ // Gets the GLError through our wrapper.
+ GLenum GetGLError();
+
+ // Sets our wrapper for the GLError.
+ void SetGLError(GLenum error, const char* function_name, const char* msg);
+ void SetGLErrorInvalidEnum(
+ const char* function_name, GLenum value, const char* label);
+
+ // Returns the last error and clears it. Useful for debugging.
+ const std::string& GetLastError() {
+ return last_error_;
+ }
+
+ // Waits for all commands to execute.
+ void WaitForCmd();
+
+ // TODO(gman): These bucket functions really seem like they belong in
+ // CommandBufferHelper (or maybe BucketHelper?). Unfortunately they need
+ // a transfer buffer to function which is currently managed by this class.
+
+ // Gets the contents of a bucket.
+ bool GetBucketContents(uint32 bucket_id, std::vector<int8>* data);
+
+ // Sets the contents of a bucket.
+ void SetBucketContents(uint32 bucket_id, const void* data, size_t size);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsCString(uint32 bucket_id, const char* str);
+
+ // Gets the contents of a bucket as a string. Returns false if there is no
+ // string available which is a separate case from the empty string.
+ bool GetBucketAsString(uint32 bucket_id, std::string* str);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsString(uint32 bucket_id, const std::string& str);
+
+ // Returns true if id is reserved.
+ bool IsBufferReservedId(GLuint id);
+ bool IsFramebufferReservedId(GLuint id) { return false; }
+ bool IsRenderbufferReservedId(GLuint id) { return false; }
+ bool IsTextureReservedId(GLuint id) { return false; }
+ bool IsVertexArrayReservedId(GLuint id) { return false; }
+ bool IsProgramReservedId(GLuint id) { return false; }
+
+ bool BindBufferHelper(GLenum target, GLuint texture);
+ bool BindFramebufferHelper(GLenum target, GLuint texture);
+ bool BindRenderbufferHelper(GLenum target, GLuint texture);
+ bool BindTextureHelper(GLenum target, GLuint texture);
+ bool BindVertexArrayOESHelper(GLuint array);
+ bool UseProgramHelper(GLuint program);
+
+ void GenBuffersHelper(GLsizei n, const GLuint* buffers);
+ void GenFramebuffersHelper(GLsizei n, const GLuint* framebuffers);
+ void GenRenderbuffersHelper(GLsizei n, const GLuint* renderbuffers);
+ void GenTexturesHelper(GLsizei n, const GLuint* textures);
+ void GenVertexArraysOESHelper(GLsizei n, const GLuint* arrays);
+ void GenQueriesEXTHelper(GLsizei n, const GLuint* queries);
+
+ void DeleteBuffersHelper(GLsizei n, const GLuint* buffers);
+ void DeleteFramebuffersHelper(GLsizei n, const GLuint* framebuffers);
+ void DeleteRenderbuffersHelper(GLsizei n, const GLuint* renderbuffers);
+ void DeleteTexturesHelper(GLsizei n, const GLuint* textures);
+ bool DeleteProgramHelper(GLuint program);
+ bool DeleteShaderHelper(GLuint shader);
+ void DeleteQueriesEXTHelper(GLsizei n, const GLuint* queries);
+ void DeleteVertexArraysOESHelper(GLsizei n, const GLuint* arrays);
+
+ void DeleteBuffersStub(GLsizei n, const GLuint* buffers);
+ void DeleteFramebuffersStub(GLsizei n, const GLuint* framebuffers);
+ void DeleteRenderbuffersStub(GLsizei n, const GLuint* renderbuffers);
+ void DeleteTexturesStub(GLsizei n, const GLuint* textures);
+ void DeleteProgramStub(GLsizei n, const GLuint* programs);
+ void DeleteShaderStub(GLsizei n, const GLuint* shaders);
+ void DeleteVertexArraysOESStub(GLsizei n, const GLuint* arrays);
+
+ void BufferDataHelper(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage);
+ void BufferSubDataHelper(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data);
+ void BufferSubDataHelperImpl(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data,
+ ScopedTransferBufferPtr* buffer);
+
+ GLuint CreateImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage);
+ void DestroyImageCHROMIUMHelper(GLuint image_id);
+ void* MapImageCHROMIUMHelper(GLuint image_id);
+ void UnmapImageCHROMIUMHelper(GLuint image_id);
+ void GetImageParameterivCHROMIUMHelper(
+ GLuint image_id, GLenum pname, GLint* params);
+ GLuint CreateGpuMemoryBufferImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage);
+
+ // Helper for GetVertexAttrib
+ bool GetVertexAttribHelper(GLuint index, GLenum pname, uint32* param);
+
+ GLuint GetMaxValueInBufferCHROMIUMHelper(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset);
+
+ void RestoreElementAndArrayBuffers(bool restore);
+ void RestoreArrayBuffer(bool restrore);
+
+ // The pixels pointer should already account for unpack skip rows and skip
+ // pixels.
+ void TexSubImage2DImpl(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, uint32 unpadded_row_size,
+ const void* pixels, uint32 pixels_padded_row_size, GLboolean internal,
+ ScopedTransferBufferPtr* buffer, uint32 buffer_padded_row_size);
+
+ // Helpers for query functions.
+ bool GetHelper(GLenum pname, GLint* params);
+ bool GetBooleanvHelper(GLenum pname, GLboolean* params);
+ bool GetBufferParameterivHelper(GLenum target, GLenum pname, GLint* params);
+ bool GetFloatvHelper(GLenum pname, GLfloat* params);
+ bool GetFramebufferAttachmentParameterivHelper(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params);
+ bool GetIntegervHelper(GLenum pname, GLint* params);
+ bool GetProgramivHelper(GLuint program, GLenum pname, GLint* params);
+ bool GetRenderbufferParameterivHelper(
+ GLenum target, GLenum pname, GLint* params);
+ bool GetShaderivHelper(GLuint shader, GLenum pname, GLint* params);
+ bool GetTexParameterfvHelper(GLenum target, GLenum pname, GLfloat* params);
+ bool GetTexParameterivHelper(GLenum target, GLenum pname, GLint* params);
+ const GLubyte* GetStringHelper(GLenum name);
+
+ bool IsExtensionAvailable(const char* ext);
+
+ // Caches certain capabilties state. Return true if cached.
+ bool SetCapabilityState(GLenum cap, bool enabled);
+
+ IdHandlerInterface* GetIdHandler(int id_namespace) const;
+ // IdAllocators for objects that can't be shared among contexts.
+ // For now, used only for Queries. TODO(hj.r.chung) Should be added for
+ // Framebuffer and Vertex array objects.
+ IdAllocator* GetIdAllocator(int id_namespace) const;
+
+ void FinishHelper();
+
+ void RunIfContextNotLost(const base::Closure& callback);
+
+ // Validate if an offset is valid, i.e., non-negative and fit into 32-bit.
+ // If not, generate an approriate error, and return false.
+ bool ValidateOffset(const char* func, GLintptr offset);
+
+ // Validate if a size is valid, i.e., non-negative and fit into 32-bit.
+ // If not, generate an approriate error, and return false.
+ bool ValidateSize(const char* func, GLsizeiptr offset);
+
+ // Remove the transfer buffer from the buffer tracker. For buffers used
+ // asynchronously the memory is free:ed if the upload has completed. For
+ // other buffers, the memory is either free:ed immediately or free:ed pending
+ // a token.
+ void RemoveTransferBuffer(BufferTracker::Buffer* buffer);
+
+ // Returns true if the async upload token has passed.
+ //
+ // NOTE: This will detect wrapped async tokens by checking if the most
+ // significant bit of async token to check is 1 but the last read is 0, i.e.
+ // the uint32 wrapped.
+ bool HasAsyncUploadTokenPassed(uint32 token) const {
+ return async_upload_sync_->HasAsyncUploadTokenPassed(token);
+ }
+
+ // Get the next async upload token.
+ uint32 NextAsyncUploadToken();
+
+ // Ensure that the shared memory used for synchronizing async upload tokens
+ // has been mapped.
+ //
+ // Returns false on error, true on success.
+ bool EnsureAsyncUploadSync();
+
+ // Checks the last read asynchronously upload token and frees any unmanaged
+ // transfer buffer that has its async token passed.
+ void PollAsyncUploads();
+
+ // Free every async upload buffer. If some async upload buffer is still in use
+ // wait for them to finish before freeing.
+ void FreeAllAsyncUploadBuffers();
+
+ bool GetBoundPixelTransferBuffer(
+ GLenum target, const char* function_name, GLuint* buffer_id);
+ BufferTracker::Buffer* GetBoundPixelUnpackTransferBufferIfValid(
+ GLuint buffer_id,
+ const char* function_name, GLuint offset, GLsizei size);
+
+ const std::string& GetLogPrefix() const;
+
+#if defined(GL_CLIENT_FAIL_GL_ERRORS)
+ void CheckGLError();
+ void FailGLError(GLenum error);
+#else
+ void CheckGLError() { }
+ void FailGLError(GLenum /* error */) { }
+#endif
+
+ GLES2Util util_;
+ GLES2CmdHelper* helper_;
+ TransferBufferInterface* transfer_buffer_;
+ std::string last_error_;
+ DebugMarkerManager debug_marker_manager_;
+ std::string this_in_hex_;
+
+ std::queue<int32> swap_buffers_tokens_;
+ std::queue<int32> rate_limit_tokens_;
+
+ ExtensionStatus angle_pack_reverse_row_order_status_;
+ ExtensionStatus chromium_framebuffer_multisample_;
+
+ GLStaticState static_state_;
+ ClientContextState state_;
+
+ // pack alignment as last set by glPixelStorei
+ GLint pack_alignment_;
+
+ // unpack alignment as last set by glPixelStorei
+ GLint unpack_alignment_;
+
+ // unpack yflip as last set by glPixelstorei
+ bool unpack_flip_y_;
+
+ // unpack row length as last set by glPixelStorei
+ GLint unpack_row_length_;
+
+ // unpack skip rows as last set by glPixelStorei
+ GLint unpack_skip_rows_;
+
+ // unpack skip pixels as last set by glPixelStorei
+ GLint unpack_skip_pixels_;
+
+ // pack reverse row order as last set by glPixelstorei
+ bool pack_reverse_row_order_;
+
+ scoped_ptr<TextureUnit[]> texture_units_;
+
+ // 0 to gl_state_.max_combined_texture_image_units.
+ GLuint active_texture_unit_;
+
+ GLuint bound_framebuffer_;
+ GLuint bound_read_framebuffer_;
+ GLuint bound_renderbuffer_;
+
+ // The program in use by glUseProgram
+ GLuint current_program_;
+
+ // The currently bound array buffer.
+ GLuint bound_array_buffer_id_;
+
+ // The currently bound pixel transfer buffers.
+ GLuint bound_pixel_pack_transfer_buffer_id_;
+ GLuint bound_pixel_unpack_transfer_buffer_id_;
+
+ // The current asynchronous pixel buffer upload token.
+ uint32 async_upload_token_;
+
+ // The shared memory used for synchronizing asynchronous upload tokens.
+ AsyncUploadSync* async_upload_sync_;
+ int32 async_upload_sync_shm_id_;
+ unsigned int async_upload_sync_shm_offset_;
+
+ // Unmanaged pixel transfer buffer memory pending asynchronous upload token.
+ typedef std::list<std::pair<void*, uint32> > DetachedAsyncUploadMemoryList;
+ DetachedAsyncUploadMemoryList detached_async_upload_memory_;
+
+ // Client side management for vertex array objects. Needed to correctly
+ // track client side arrays.
+ scoped_ptr<VertexArrayObjectManager> vertex_array_object_manager_;
+
+ GLuint reserved_ids_[2];
+
+ // Current GL error bits.
+ uint32 error_bits_;
+
+ // Whether or not to print debugging info.
+ bool debug_;
+
+ // When true, the context is lost when a GL_OUT_OF_MEMORY error occurs.
+ bool lose_context_when_out_of_memory_;
+
+ // Used to check for single threaded access.
+ int use_count_;
+
+ // Map of GLenum to Strings for glGetString. We need to cache these because
+ // the pointer passed back to the client has to remain valid for eternity.
+ typedef std::map<uint32, std::set<std::string> > GLStringMap;
+ GLStringMap gl_strings_;
+
+ // Similar cache for glGetRequestableExtensionsCHROMIUM. We don't
+ // have an enum for this so handle it separately.
+ std::set<std::string> requestable_extensions_set_;
+
+ typedef std::map<const void*, MappedBuffer> MappedBufferMap;
+ MappedBufferMap mapped_buffers_;
+
+ typedef std::map<const void*, MappedTexture> MappedTextureMap;
+ MappedTextureMap mapped_textures_;
+
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+
+ scoped_refptr<ShareGroup> share_group_;
+ ShareGroupContextData share_group_context_data_;
+
+ scoped_ptr<QueryTracker> query_tracker_;
+ typedef std::map<GLuint, QueryTracker::Query*> QueryMap;
+ QueryMap current_queries_;
+ scoped_ptr<IdAllocator> query_id_allocator_;
+
+ scoped_ptr<BufferTracker> buffer_tracker_;
+
+ scoped_ptr<GpuMemoryBufferTracker> gpu_memory_buffer_tracker_;
+
+ GLES2ImplementationErrorMessageCallback* error_message_callback_;
+
+ scoped_ptr<std::string> current_trace_name_;
+
+ GpuControl* gpu_control_;
+
+ Capabilities capabilities_;
+
+ base::WeakPtrFactory<GLES2Implementation> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2Implementation);
+};
+
+inline bool GLES2Implementation::GetBufferParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetFramebufferAttachmentParameterivHelper(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetRenderbufferParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetShaderivHelper(
+ GLuint /* shader */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetTexParameterfvHelper(
+ GLenum /* target */, GLenum /* pname */, GLfloat* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetTexParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h
new file mode 100644
index 0000000..0a53a86
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -0,0 +1,740 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+
+virtual void Clear(GLbitfield mask) OVERRIDE;
+
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+
+virtual void ClearStencil(GLint s) OVERRIDE;
+
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+
+virtual void CompileShader(GLuint shader) OVERRIDE;
+
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual GLuint CreateProgram() OVERRIDE;
+
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+
+virtual void CullFace(GLenum mode) OVERRIDE;
+
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+
+virtual void DepthFunc(GLenum func) OVERRIDE;
+
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+
+virtual void Disable(GLenum cap) OVERRIDE;
+
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+
+virtual void Enable(GLenum cap) OVERRIDE;
+
+virtual void Finish() OVERRIDE;
+
+virtual void Flush() OVERRIDE;
+
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+
+virtual void FrontFace(GLenum mode) OVERRIDE;
+
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual GLenum GetError() OVERRIDE;
+
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+
+virtual void LineWidth(GLfloat width) OVERRIDE;
+
+virtual void LinkProgram(GLuint program) OVERRIDE;
+
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+
+virtual void ReleaseShaderCompiler() OVERRIDE;
+
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+
+virtual void StencilMask(GLuint mask) OVERRIDE;
+
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UseProgram(GLuint program) OVERRIDE;
+
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+
+virtual void PopGroupMarkerEXT() OVERRIDE;
+
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+
+virtual void SwapBuffers() OVERRIDE;
+
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+
+virtual void TraceEndCHROMIUM() OVERRIDE;
+
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
new file mode 100644
index 0000000..e63ba63
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -0,0 +1,2165 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.cc to define the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
+
+void GLES2Implementation::AttachShader(GLuint program, GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glAttachShader(" << program << ", "
+ << shader << ")");
+ helper_->AttachShader(program, shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindBuffer(GLenum target, GLuint buffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindBuffer("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << buffer << ")");
+ if (IsBufferReservedId(buffer)) {
+ SetGLError(GL_INVALID_OPERATION, "BindBuffer", "buffer reserved id");
+ return;
+ }
+ if (BindBufferHelper(target, buffer)) {
+ helper_->BindBuffer(target, buffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindFramebuffer(GLenum target, GLuint framebuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindFramebuffer("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << framebuffer << ")");
+ if (IsFramebufferReservedId(framebuffer)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "BindFramebuffer", "framebuffer reserved id");
+ return;
+ }
+ if (BindFramebufferHelper(target, framebuffer)) {
+ helper_->BindFramebuffer(target, framebuffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindRenderbuffer("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << renderbuffer << ")");
+ if (IsRenderbufferReservedId(renderbuffer)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "BindRenderbuffer", "renderbuffer reserved id");
+ return;
+ }
+ if (BindRenderbufferHelper(target, renderbuffer)) {
+ helper_->BindRenderbuffer(target, renderbuffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindTexture(GLenum target, GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindTexture("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << texture << ")");
+ if (IsTextureReservedId(texture)) {
+ SetGLError(GL_INVALID_OPERATION, "BindTexture", "texture reserved id");
+ return;
+ }
+ if (BindTextureHelper(target, texture)) {
+ helper_->BindTexture(target, texture);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendColor(" << red << ", "
+ << green << ", " << blue << ", " << alpha << ")");
+ helper_->BlendColor(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendEquation(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquation("
+ << GLES2Util::GetStringEquation(mode) << ")");
+ helper_->BlendEquation(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendEquationSeparate(GLenum modeRGB,
+ GLenum modeAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationSeparate("
+ << GLES2Util::GetStringEquation(modeRGB) << ", "
+ << GLES2Util::GetStringEquation(modeAlpha) << ")");
+ helper_->BlendEquationSeparate(modeRGB, modeAlpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFunc(GLenum sfactor, GLenum dfactor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFunc("
+ << GLES2Util::GetStringSrcBlendFactor(sfactor) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dfactor) << ")");
+ helper_->BlendFunc(sfactor, dfactor);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFuncSeparate("
+ << GLES2Util::GetStringSrcBlendFactor(srcRGB) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dstRGB) << ", "
+ << GLES2Util::GetStringSrcBlendFactor(srcAlpha) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dstAlpha) << ")");
+ helper_->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ CheckGLError();
+}
+
+GLenum GLES2Implementation::CheckFramebufferStatus(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::CheckFramebufferStatus");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCheckFramebufferStatus("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ")");
+ typedef cmds::CheckFramebufferStatus::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ *result = 0;
+ helper_->CheckFramebufferStatus(
+ target, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLenum result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::Clear(GLbitfield mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClear(" << mask << ")");
+ helper_->Clear(mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearColor(" << red << ", "
+ << green << ", " << blue << ", " << alpha << ")");
+ helper_->ClearColor(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearDepthf(GLclampf depth) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearDepthf(" << depth << ")");
+ helper_->ClearDepthf(depth);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearStencil(GLint s) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearStencil(" << s << ")");
+ helper_->ClearStencil(s);
+ CheckGLError();
+}
+
+void GLES2Implementation::ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glColorMask("
+ << GLES2Util::GetStringBool(red) << ", "
+ << GLES2Util::GetStringBool(green) << ", "
+ << GLES2Util::GetStringBool(blue) << ", "
+ << GLES2Util::GetStringBool(alpha) << ")");
+ helper_->ColorMask(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::CompileShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompileShader(" << shader
+ << ")");
+ helper_->CompileShader(shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glCopyTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", " << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << x << ", " << y << ", " << width << ", " << height << ", " << border
+ << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "height < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "border GL_INVALID_VALUE");
+ return;
+ }
+ helper_->CopyTexImage2D(target, level, internalformat, x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopyTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", " << xoffset << ", " << yoffset << ", " << x
+ << ", " << y << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexSubImage2D", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexSubImage2D", "height < 0");
+ return;
+ }
+ helper_->CopyTexSubImage2D(
+ target, level, xoffset, yoffset, x, y, width, height);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateProgram() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateProgram("
+ << ")");
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kProgramsAndShaders)
+ ->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateProgram(client_id);
+ GPU_CLIENT_LOG("returned " << client_id);
+ CheckGLError();
+ return client_id;
+}
+
+GLuint GLES2Implementation::CreateShader(GLenum type) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateShader("
+ << GLES2Util::GetStringShaderType(type) << ")");
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kProgramsAndShaders)
+ ->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateShader(type, client_id);
+ GPU_CLIENT_LOG("returned " << client_id);
+ CheckGLError();
+ return client_id;
+}
+
+void GLES2Implementation::CullFace(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCullFace("
+ << GLES2Util::GetStringFaceType(mode) << ")");
+ helper_->CullFace(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteBuffers(" << n << ", "
+ << static_cast<const void*>(buffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << buffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(buffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteBuffers", "n < 0");
+ return;
+ }
+ DeleteBuffersHelper(n, buffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteFramebuffers(GLsizei n,
+ const GLuint* framebuffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteFramebuffers(" << n << ", "
+ << static_cast<const void*>(framebuffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << framebuffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(framebuffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteFramebuffers", "n < 0");
+ return;
+ }
+ DeleteFramebuffersHelper(n, framebuffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteProgram(" << program
+ << ")");
+ GPU_CLIENT_DCHECK(program != 0);
+ DeleteProgramHelper(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteRenderbuffers(" << n
+ << ", " << static_cast<const void*>(renderbuffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << renderbuffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(renderbuffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteRenderbuffers", "n < 0");
+ return;
+ }
+ DeleteRenderbuffersHelper(n, renderbuffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteShader(" << shader << ")");
+ GPU_CLIENT_DCHECK(shader != 0);
+ DeleteShaderHelper(shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteTextures(GLsizei n, const GLuint* textures) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(textures[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteTextures", "n < 0");
+ return;
+ }
+ DeleteTexturesHelper(n, textures);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthFunc(GLenum func) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthFunc("
+ << GLES2Util::GetStringCmpFunction(func) << ")");
+ helper_->DepthFunc(func);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthMask(GLboolean flag) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthMask("
+ << GLES2Util::GetStringBool(flag) << ")");
+ helper_->DepthMask(flag);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthRangef(GLclampf zNear, GLclampf zFar) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthRangef(" << zNear << ", "
+ << zFar << ")");
+ helper_->DepthRangef(zNear, zFar);
+ CheckGLError();
+}
+
+void GLES2Implementation::DetachShader(GLuint program, GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDetachShader(" << program << ", "
+ << shader << ")");
+ helper_->DetachShader(program, shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFramebufferRenderbuffer("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringRenderBufferTarget(
+ renderbuffertarget) << ", " << renderbuffer << ")");
+ helper_->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFramebufferTexture2D("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringTextureTarget(textarget) << ", "
+ << texture << ", " << level << ")");
+ if (level != 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glFramebufferTexture2D", "level GL_INVALID_VALUE");
+ return;
+ }
+ helper_->FramebufferTexture2D(target, attachment, textarget, texture);
+ CheckGLError();
+}
+
+void GLES2Implementation::FrontFace(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFrontFace("
+ << GLES2Util::GetStringFaceMode(mode) << ")");
+ helper_->FrontFace(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenBuffers(GLsizei n, GLuint* buffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenBuffers(" << n << ", "
+ << static_cast<const void*>(buffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenBuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kBuffers)->MakeIds(this, 0, n, buffers);
+ GenBuffersHelper(n, buffers);
+ helper_->GenBuffersImmediate(n, buffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << buffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenerateMipmap(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenerateMipmap("
+ << GLES2Util::GetStringTextureBindTarget(target) << ")");
+ helper_->GenerateMipmap(target);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenFramebuffers(GLsizei n, GLuint* framebuffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenFramebuffers(" << n << ", "
+ << static_cast<const void*>(framebuffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenFramebuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kFramebuffers)->MakeIds(this, 0, n, framebuffers);
+ GenFramebuffersHelper(n, framebuffers);
+ helper_->GenFramebuffersImmediate(n, framebuffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << framebuffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenRenderbuffers(GLsizei n, GLuint* renderbuffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenRenderbuffers(" << n << ", "
+ << static_cast<const void*>(renderbuffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenRenderbuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kRenderbuffers)
+ ->MakeIds(this, 0, n, renderbuffers);
+ GenRenderbuffersHelper(n, renderbuffers);
+ helper_->GenRenderbuffersImmediate(n, renderbuffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << renderbuffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenTextures(GLsizei n, GLuint* textures) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenTextures", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kTextures)->MakeIds(this, 0, n, textures);
+ GenTexturesHelper(n, textures);
+ helper_->GenTexturesImmediate(n, textures);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetBooleanv(GLenum pname, GLboolean* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLboolean, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBooleanv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetBooleanv");
+ if (GetBooleanvHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetBooleanv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetBooleanv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBufferParameteriv("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << GLES2Util::GetStringBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetBufferParameteriv");
+ if (GetBufferParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetBufferParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetFloatv(GLenum pname, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetFloatv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetFloatv");
+ if (GetFloatvHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetFloatv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetFloatv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glGetFramebufferAttachmentParameteriv("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringFrameBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu",
+ "GLES2Implementation::GetFramebufferAttachmentParameteriv");
+ if (GetFramebufferAttachmentParameterivHelper(
+ target, attachment, pname, params)) {
+ return;
+ }
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetFramebufferAttachmentParameteriv(
+ target, attachment, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetIntegerv(GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetIntegerv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetIntegerv");
+ if (GetIntegervHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetIntegerv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetProgramiv(GLuint program,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramiv(" << program << ", "
+ << GLES2Util::GetStringProgramParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetProgramiv");
+ if (GetProgramivHelper(program, pname, params)) {
+ return;
+ }
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetProgramiv(program, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramInfoLog"
+ << "(" << program << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(infolog) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetProgramInfoLog(program, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(infolog, str.c_str(), max_size);
+ infolog[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << infolog << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetRenderbufferParameteriv("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << GLES2Util::GetStringRenderBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetRenderbufferParameteriv");
+ if (GetRenderbufferParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetRenderbufferParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderiv(GLuint shader,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderiv(" << shader << ", "
+ << GLES2Util::GetStringShaderParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetShaderiv");
+ if (GetShaderivHelper(shader, pname, params)) {
+ return;
+ }
+ typedef cmds::GetShaderiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetShaderiv(shader, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderInfoLog"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(infolog) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetShaderInfoLog(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(infolog, str.c_str(), max_size);
+ infolog[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << infolog << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderSource"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(source) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetShaderSource(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(source, str.c_str(), max_size);
+ source[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << source << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTexParameterfv("
+ << GLES2Util::GetStringGetTexParamTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetTexParameterfv");
+ if (GetTexParameterfvHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetTexParameterfv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTexParameteriv("
+ << GLES2Util::GetStringGetTexParamTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetTexParameteriv");
+ if (GetTexParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetTexParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::Hint(GLenum target, GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glHint("
+ << GLES2Util::GetStringHintTarget(target) << ", "
+ << GLES2Util::GetStringHintMode(mode) << ")");
+ helper_->Hint(target, mode);
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsBuffer(GLuint buffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsBuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsBuffer(" << buffer << ")");
+ typedef cmds::IsBuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsBuffer(buffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsFramebuffer(GLuint framebuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsFramebuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsFramebuffer(" << framebuffer
+ << ")");
+ typedef cmds::IsFramebuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsFramebuffer(framebuffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsProgram");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsProgram(" << program << ")");
+ typedef cmds::IsProgram::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsProgram(program, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsRenderbuffer(GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsRenderbuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsRenderbuffer(" << renderbuffer
+ << ")");
+ typedef cmds::IsRenderbuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsRenderbuffer(renderbuffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsShader");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsShader(" << shader << ")");
+ typedef cmds::IsShader::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsShader(shader, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsTexture(GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsTexture");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsTexture(" << texture << ")");
+ typedef cmds::IsTexture::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsTexture(texture, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::LineWidth(GLfloat width) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLineWidth(" << width << ")");
+ helper_->LineWidth(width);
+ CheckGLError();
+}
+
+void GLES2Implementation::PolygonOffset(GLfloat factor, GLfloat units) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPolygonOffset(" << factor << ", "
+ << units << ")");
+ helper_->PolygonOffset(factor, units);
+ CheckGLError();
+}
+
+void GLES2Implementation::ReleaseShaderCompiler() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReleaseShaderCompiler("
+ << ")");
+ helper_->ReleaseShaderCompiler();
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRenderbufferStorage("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorage", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorage", "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorage(target, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::SampleCoverage(GLclampf value, GLboolean invert) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSampleCoverage(" << value << ", "
+ << GLES2Util::GetStringBool(invert) << ")");
+ helper_->SampleCoverage(value, invert);
+ CheckGLError();
+}
+
+void GLES2Implementation::Scissor(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glScissor(" << x << ", " << y
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glScissor", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glScissor", "height < 0");
+ return;
+ }
+ helper_->Scissor(x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilFunc("
+ << GLES2Util::GetStringCmpFunction(func) << ", " << ref
+ << ", " << mask << ")");
+ helper_->StencilFunc(func, ref, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilFuncSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", "
+ << GLES2Util::GetStringCmpFunction(func) << ", " << ref
+ << ", " << mask << ")");
+ helper_->StencilFuncSeparate(face, func, ref, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilMask(GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilMask(" << mask << ")");
+ helper_->StencilMask(mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilMaskSeparate(GLenum face, GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilMaskSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", " << mask
+ << ")");
+ helper_->StencilMaskSeparate(face, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilOp("
+ << GLES2Util::GetStringStencilOp(fail) << ", "
+ << GLES2Util::GetStringStencilOp(zfail) << ", "
+ << GLES2Util::GetStringStencilOp(zpass) << ")");
+ helper_->StencilOp(fail, zfail, zpass);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilOpSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", "
+ << GLES2Util::GetStringStencilOp(fail) << ", "
+ << GLES2Util::GetStringStencilOp(zfail) << ", "
+ << GLES2Util::GetStringStencilOp(zpass) << ")");
+ helper_->StencilOpSeparate(face, fail, zfail, zpass);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameterf(GLenum target,
+ GLenum pname,
+ GLfloat param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameterf("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << param << ")");
+ helper_->TexParameterf(target, pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameterfv("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GPU_CLIENT_LOG("values: " << params[0]);
+ helper_->TexParameterfvImmediate(target, pname, params);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameteri("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << param << ")");
+ helper_->TexParameteri(target, pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameteriv("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GPU_CLIENT_LOG("values: " << params[0]);
+ helper_->TexParameterivImmediate(target, pname, params);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1f(GLint location, GLfloat x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1f(" << location << ", "
+ << x << ")");
+ helper_->Uniform1f(location, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform1fv", "count < 0");
+ return;
+ }
+ helper_->Uniform1fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1i(GLint location, GLint x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1i(" << location << ", "
+ << x << ")");
+ helper_->Uniform1i(location, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform1iv", "count < 0");
+ return;
+ }
+ helper_->Uniform1ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2f(" << location << ", "
+ << x << ", " << y << ")");
+ helper_->Uniform2f(location, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 2] << ", " << v[1 + i * 2]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform2fv", "count < 0");
+ return;
+ }
+ helper_->Uniform2fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2i(GLint location, GLint x, GLint y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2i(" << location << ", "
+ << x << ", " << y << ")");
+ helper_->Uniform2i(location, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 2] << ", " << v[1 + i * 2]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform2iv", "count < 0");
+ return;
+ }
+ helper_->Uniform2ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3f(" << location << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->Uniform3f(location, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 3] << ", " << v[1 + i * 3]
+ << ", " << v[2 + i * 3]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform3fv", "count < 0");
+ return;
+ }
+ helper_->Uniform3fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3i(" << location << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->Uniform3i(location, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 3] << ", " << v[1 + i * 3]
+ << ", " << v[2 + i * 3]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform3iv", "count < 0");
+ return;
+ }
+ helper_->Uniform3ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4f(" << location << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->Uniform4f(location, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 4] << ", " << v[1 + i * 4]
+ << ", " << v[2 + i * 4] << ", " << v[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform4fv", "count < 0");
+ return;
+ }
+ helper_->Uniform4fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4i(" << location << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->Uniform4i(location, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 4] << ", " << v[1 + i * 4]
+ << ", " << v[2 + i * 4] << ", " << v[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform4iv", "count < 0");
+ return;
+ }
+ helper_->Uniform4ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix2fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << value[0 + i * 4] << ", "
+ << value[1 + i * 4] << ", " << value[2 + i * 4]
+ << ", " << value[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix2fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix2fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix2fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix3fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << value[0 + i * 9] << ", "
+ << value[1 + i * 9] << ", " << value[2 + i * 9]
+ << ", " << value[3 + i * 9] << ", "
+ << value[4 + i * 9] << ", " << value[5 + i * 9]
+ << ", " << value[6 + i * 9] << ", "
+ << value[7 + i * 9] << ", " << value[8 + i * 9]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix3fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix3fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix3fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix4fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(
+ " " << i << ": " << value[0 + i * 16] << ", " << value[1 + i * 16]
+ << ", " << value[2 + i * 16] << ", " << value[3 + i * 16] << ", "
+ << value[4 + i * 16] << ", " << value[5 + i * 16] << ", "
+ << value[6 + i * 16] << ", " << value[7 + i * 16] << ", "
+ << value[8 + i * 16] << ", " << value[9 + i * 16] << ", "
+ << value[10 + i * 16] << ", " << value[11 + i * 16] << ", "
+ << value[12 + i * 16] << ", " << value[13 + i * 16] << ", "
+ << value[14 + i * 16] << ", " << value[15 + i * 16]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix4fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix4fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix4fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UseProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUseProgram(" << program << ")");
+ if (IsProgramReservedId(program)) {
+ SetGLError(GL_INVALID_OPERATION, "UseProgram", "program reserved id");
+ return;
+ }
+ if (UseProgramHelper(program)) {
+ helper_->UseProgram(program);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::ValidateProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glValidateProgram(" << program
+ << ")");
+ helper_->ValidateProgram(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib1f(GLuint indx, GLfloat x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib1f(" << indx << ", "
+ << x << ")");
+ helper_->VertexAttrib1f(indx, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib1fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib1fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0]);
+ helper_->VertexAttrib1fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib2f(" << indx << ", "
+ << x << ", " << y << ")");
+ helper_->VertexAttrib2f(indx, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib2fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib2fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1]);
+ helper_->VertexAttrib2fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib3f(" << indx << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->VertexAttrib3f(indx, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib3fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib3fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1] << ", "
+ << values[2]);
+ helper_->VertexAttrib3fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib4f(" << indx << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->VertexAttrib4f(indx, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib4fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib4fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1] << ", "
+ << values[2] << ", " << values[3]);
+ helper_->VertexAttrib4fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::Viewport(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glViewport(" << x << ", " << y
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glViewport", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glViewport", "height < 0");
+ return;
+ }
+ helper_->Viewport(x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlitFramebufferCHROMIUM("
+ << srcX0 << ", " << srcY0 << ", " << srcX1 << ", " << srcY1
+ << ", " << dstX0 << ", " << dstY0 << ", " << dstX1 << ", "
+ << dstY1 << ", " << mask << ", "
+ << GLES2Util::GetStringBlitFilter(filter) << ")");
+ helper_->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glRenderbufferStorageMultisampleCHROMIUM("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", " << samples
+ << ", " << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (samples < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "samples < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorageMultisampleEXT(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glRenderbufferStorageMultisampleEXT("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", " << samples
+ << ", " << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (samples < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "samples < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glFramebufferTexture2DMultisampleEXT("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringTextureTarget(textarget) << ", "
+ << texture << ", " << level << ", " << samples << ")");
+ if (level != 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "level GL_INVALID_VALUE");
+ return;
+ }
+ if (samples < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "samples < 0");
+ return;
+ }
+ helper_->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, samples);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glTexStorage2DEXT("
+ << GLES2Util::GetStringTextureTarget(target) << ", " << levels << ", "
+ << GLES2Util::GetStringTextureInternalFormatStorage(internalFormat)
+ << ", " << width << ", " << height << ")");
+ if (levels < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "levels < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "height < 0");
+ return;
+ }
+ helper_->TexStorage2DEXT(target, levels, internalFormat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenQueriesEXT", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ IdAllocator* id_allocator = GetIdAllocator(id_namespaces::kQueries);
+ for (GLsizei ii = 0; ii < n; ++ii)
+ queries[ii] = id_allocator->AllocateID();
+ GenQueriesEXTHelper(n, queries);
+ helper_->GenQueriesEXTImmediate(n, queries);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteQueriesEXT(GLsizei n, const GLuint* queries) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(queries[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteQueriesEXT", "n < 0");
+ return;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenVertexArraysOES(" << n << ", "
+ << static_cast<const void*>(arrays) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenVertexArraysOES", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kVertexArrays)->MakeIds(this, 0, n, arrays);
+ GenVertexArraysOESHelper(n, arrays);
+ helper_->GenVertexArraysOESImmediate(n, arrays);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << arrays[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteVertexArraysOES(GLsizei n,
+ const GLuint* arrays) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteVertexArraysOES(" << n
+ << ", " << static_cast<const void*>(arrays) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << arrays[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(arrays[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteVertexArraysOES", "n < 0");
+ return;
+ }
+ DeleteVertexArraysOESHelper(n, arrays);
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsVertexArrayOES(GLuint array) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsVertexArrayOES");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsVertexArrayOES(" << array
+ << ")");
+ typedef cmds::IsVertexArrayOES::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsVertexArrayOES(array, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::BindVertexArrayOES(GLuint array) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindVertexArrayOES(" << array
+ << ")");
+ if (IsVertexArrayReservedId(array)) {
+ SetGLError(GL_INVALID_OPERATION, "BindVertexArrayOES", "array reserved id");
+ return;
+ }
+ if (BindVertexArrayOESHelper(array)) {
+ helper_->BindVertexArrayOES(array);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTranslatedShaderSourceANGLE"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(source) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetTranslatedShaderSourceANGLE(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(source, str.c_str(), max_size);
+ source[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << source << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImageIOSurface2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << width << ", " << height << ", " << ioSurfaceId << ", "
+ << plane << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "height < 0");
+ return;
+ }
+ helper_->TexImageIOSurface2DCHROMIUM(
+ target, width, height, ioSurfaceId, plane);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopyTextureCHROMIUM("
+ << GLES2Util::GetStringEnum(target) << ", "
+ << GLES2Util::GetStringEnum(source_id) << ", "
+ << GLES2Util::GetStringEnum(dest_id) << ", " << level
+ << ", " << internalformat << ", "
+ << GLES2Util::GetStringPixelType(dest_type) << ")");
+ helper_->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << imageId << ")");
+ helper_->BindTexImage2DCHROMIUM(target, imageId);
+ CheckGLError();
+}
+
+void GLES2Implementation::ReleaseTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReleaseTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << imageId << ")");
+ helper_->ReleaseTexImage2DCHROMIUM(target, imageId);
+ CheckGLError();
+}
+
+void GLES2Implementation::DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDiscardFramebufferEXT("
+ << GLES2Util::GetStringEnum(target) << ", " << count
+ << ", " << static_cast<const void*>(attachments) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << attachments[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDiscardFramebufferEXT", "count < 0");
+ return;
+ }
+ helper_->DiscardFramebufferEXTImmediate(target, count, attachments);
+ CheckGLError();
+}
+
+void GLES2Implementation::LoseContextCHROMIUM(GLenum current, GLenum other) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLoseContextCHROMIUM("
+ << GLES2Util::GetStringResetStatus(current) << ", "
+ << GLES2Util::GetStringResetStatus(other) << ")");
+ helper_->LoseContextCHROMIUM(current, other);
+ CheckGLError();
+}
+
+void GLES2Implementation::WaitSyncPointCHROMIUM(GLuint sync_point) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glWaitSyncPointCHROMIUM("
+ << sync_point << ")");
+ helper_->WaitSyncPointCHROMIUM(sync_point);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawBuffersEXT(" << count << ", "
+ << static_cast<const void*>(bufs) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << bufs[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawBuffersEXT", "count < 0");
+ return;
+ }
+ helper_->DrawBuffersEXTImmediate(count, bufs);
+ CheckGLError();
+}
+
+void GLES2Implementation::DiscardBackbufferCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDiscardBackbufferCHROMIUM("
+ << ")");
+ helper_->DiscardBackbufferCHROMIUM();
+ CheckGLError();
+}
+
+void GLES2Implementation::ScheduleOverlayPlaneCHROMIUM(
+ GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glScheduleOverlayPlaneCHROMIUM("
+ << plane_z_order << ", " << GLES2Util::GetStringEnum(plane_transform)
+ << ", " << overlay_texture_id << ", " << bounds_x << ", " << bounds_y
+ << ", " << bounds_width << ", " << bounds_height << ", " << uv_x
+ << ", " << uv_y << ", " << uv_width << ", " << uv_height << ")");
+ helper_->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+ CheckGLError();
+}
+
+void GLES2Implementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
+ const GLfloat* m) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadfCHROMIUM("
+ << GLES2Util::GetStringMatrixMode(matrixMode) << ", "
+ << static_cast<const void*>(m) << ")");
+ GPU_CLIENT_LOG("values: " << m[0] << ", " << m[1] << ", " << m[2] << ", "
+ << m[3] << ", " << m[4] << ", " << m[5] << ", "
+ << m[6] << ", " << m[7] << ", " << m[8] << ", "
+ << m[9] << ", " << m[10] << ", " << m[11] << ", "
+ << m[12] << ", " << m[13] << ", " << m[14] << ", "
+ << m[15]);
+ helper_->MatrixLoadfCHROMIUMImmediate(matrixMode, m);
+ CheckGLError();
+}
+
+void GLES2Implementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadIdentityCHROMIUM("
+ << GLES2Util::GetStringMatrixMode(matrixMode) << ")");
+ helper_->MatrixLoadIdentityCHROMIUM(matrixMode);
+ CheckGLError();
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc
new file mode 100644
index 0000000..80a2e41
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -0,0 +1,3414 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for GLES2Implementation.
+
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#include <limits>
+
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#if !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+using testing::_;
+using testing::AtLeast;
+using testing::AnyNumber;
+using testing::DoAll;
+using testing::InSequence;
+using testing::Invoke;
+using testing::Mock;
+using testing::Sequence;
+using testing::StrictMock;
+using testing::Truly;
+using testing::Return;
+
+namespace gpu {
+namespace gles2 {
+
+ACTION_P2(SetMemory, dst, obj) {
+ memcpy(dst, &obj, sizeof(obj));
+}
+
+ACTION_P3(SetMemoryFromArray, dst, array, size) {
+ memcpy(dst, array, size);
+}
+
+// Used to help set the transfer buffer result to SizedResult of a single value.
+template <typename T>
+class SizedResultHelper {
+ public:
+ explicit SizedResultHelper(T result)
+ : size_(sizeof(result)),
+ result_(result) {
+ }
+
+ private:
+ uint32 size_;
+ T result_;
+};
+
+// Struct to make it easy to pass a vec4 worth of floats.
+struct FourFloats {
+ FourFloats(float _x, float _y, float _z, float _w)
+ : x(_x),
+ y(_y),
+ z(_z),
+ w(_w) {
+ }
+
+ float x;
+ float y;
+ float z;
+ float w;
+};
+
+#pragma pack(push, 1)
+// Struct that holds 7 characters.
+struct Str7 {
+ char str[7];
+};
+#pragma pack(pop)
+
+class MockTransferBuffer : public TransferBufferInterface {
+ public:
+ struct ExpectedMemoryInfo {
+ uint32 offset;
+ int32 id;
+ uint8* ptr;
+ };
+
+ MockTransferBuffer(
+ CommandBuffer* command_buffer,
+ unsigned int size,
+ unsigned int result_size,
+ unsigned int alignment)
+ : command_buffer_(command_buffer),
+ size_(size),
+ result_size_(result_size),
+ alignment_(alignment),
+ actual_buffer_index_(0),
+ expected_buffer_index_(0),
+ last_alloc_(NULL),
+ expected_offset_(result_size),
+ actual_offset_(result_size) {
+ // We have to allocate the buffers here because
+ // we need to know their address before GLES2Implementation::Initialize
+ // is called.
+ for (int ii = 0; ii < kNumBuffers; ++ii) {
+ buffers_[ii] = command_buffer_->CreateTransferBuffer(
+ size_ + ii * alignment_,
+ &buffer_ids_[ii]);
+ EXPECT_NE(-1, buffer_ids_[ii]);
+ }
+ }
+
+ virtual ~MockTransferBuffer() { }
+
+ virtual bool Initialize(
+ unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int size_to_flush) OVERRIDE;
+ virtual int GetShmId() OVERRIDE;
+ virtual void* GetResultBuffer() OVERRIDE;
+ virtual int GetResultOffset() OVERRIDE;
+ virtual void Free() OVERRIDE;
+ virtual bool HaveBuffer() const OVERRIDE;
+ virtual void* AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) OVERRIDE;
+ virtual void* Alloc(unsigned int size) OVERRIDE;
+ virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE;
+ virtual void FreePendingToken(void* p, unsigned int /* token */) OVERRIDE;
+
+ size_t MaxTransferBufferSize() {
+ return size_ - result_size_;
+ }
+
+ unsigned int RoundToAlignment(unsigned int size) {
+ return (size + alignment_ - 1) & ~(alignment_ - 1);
+ }
+
+ bool InSync() {
+ return expected_buffer_index_ == actual_buffer_index_ &&
+ expected_offset_ == actual_offset_;
+ }
+
+ ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = AllocateExpectedTransferBuffer(size);
+ mem.id = GetExpectedTransferBufferId();
+ mem.ptr = static_cast<uint8*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+ }
+
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = GetExpectedResultBufferOffset();
+ mem.id = GetExpectedResultBufferId();
+ mem.ptr = static_cast<uint8*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+ }
+
+ private:
+ static const int kNumBuffers = 2;
+
+ uint8* actual_buffer() const {
+ return static_cast<uint8*>(buffers_[actual_buffer_index_]->memory());
+ }
+
+ uint8* expected_buffer() const {
+ return static_cast<uint8*>(buffers_[expected_buffer_index_]->memory());
+ }
+
+ uint32 AllocateExpectedTransferBuffer(size_t size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ expected_buffer_index_ = (expected_buffer_index_ + 1) % kNumBuffers;
+
+ if (expected_offset_ + size > size_) {
+ expected_offset_ = result_size_;
+ }
+ uint32 offset = expected_offset_;
+ expected_offset_ += RoundToAlignment(size);
+
+ // Make sure each buffer has a different offset.
+ return offset + expected_buffer_index_ * alignment_;
+ }
+
+ void* GetExpectedTransferAddressFromOffset(uint32 offset, size_t size) {
+ EXPECT_GE(offset, expected_buffer_index_ * alignment_);
+ EXPECT_LE(offset + size, size_ + expected_buffer_index_ * alignment_);
+ return expected_buffer() + offset;
+ }
+
+ int GetExpectedResultBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+ }
+
+ uint32 GetExpectedResultBufferOffset() {
+ return expected_buffer_index_ * alignment_;
+ }
+
+ int GetExpectedTransferBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+ }
+
+ CommandBuffer* command_buffer_;
+ size_t size_;
+ size_t result_size_;
+ uint32 alignment_;
+ int buffer_ids_[kNumBuffers];
+ scoped_refptr<Buffer> buffers_[kNumBuffers];
+ int actual_buffer_index_;
+ int expected_buffer_index_;
+ void* last_alloc_;
+ uint32 expected_offset_;
+ uint32 actual_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTransferBuffer);
+};
+
+bool MockTransferBuffer::Initialize(
+ unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int /* size_to_flush */) {
+ // Just check they match.
+ return size_ == starting_buffer_size &&
+ result_size_ == result_size &&
+ alignment_ == alignment;
+};
+
+int MockTransferBuffer::GetShmId() {
+ return buffer_ids_[actual_buffer_index_];
+}
+
+void* MockTransferBuffer::GetResultBuffer() {
+ return actual_buffer() + actual_buffer_index_ * alignment_;
+}
+
+int MockTransferBuffer::GetResultOffset() {
+ return actual_buffer_index_ * alignment_;
+}
+
+void MockTransferBuffer::Free() {
+ NOTREACHED();
+}
+
+bool MockTransferBuffer::HaveBuffer() const {
+ return true;
+}
+
+void* MockTransferBuffer::AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) {
+ EXPECT_TRUE(size_allocated != NULL);
+ EXPECT_TRUE(last_alloc_ == NULL);
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ actual_buffer_index_ = (actual_buffer_index_ + 1) % kNumBuffers;
+
+ size = std::min(static_cast<size_t>(size), MaxTransferBufferSize());
+ if (actual_offset_ + size > size_) {
+ actual_offset_ = result_size_;
+ }
+ uint32 offset = actual_offset_;
+ actual_offset_ += RoundToAlignment(size);
+ *size_allocated = size;
+
+ // Make sure each buffer has a different offset.
+ last_alloc_ = actual_buffer() + offset + actual_buffer_index_ * alignment_;
+ return last_alloc_;
+}
+
+void* MockTransferBuffer::Alloc(unsigned int size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+ unsigned int temp = 0;
+ void* p = AllocUpTo(size, &temp);
+ EXPECT_EQ(temp, size);
+ return p;
+}
+
+RingBuffer::Offset MockTransferBuffer::GetOffset(void* pointer) const {
+ // Make sure each buffer has a different offset.
+ return static_cast<uint8*>(pointer) - actual_buffer();
+}
+
+void MockTransferBuffer::FreePendingToken(void* p, unsigned int /* token */) {
+ EXPECT_EQ(last_alloc_, p);
+ last_alloc_ = NULL;
+}
+
+// API wrapper for Buffers.
+class GenBuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenBuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteBuffers(n, ids);
+ }
+};
+
+// API wrapper for Framebuffers.
+class GenFramebuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenFramebuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteFramebuffers(n, ids);
+ }
+};
+
+// API wrapper for Renderbuffers.
+class GenRenderbuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenRenderbuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteRenderbuffers(n, ids);
+ }
+};
+
+// API wrapper for Textures.
+class GenTexturesAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenTextures(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteTextures(n, ids);
+ }
+};
+
+class GLES2ImplementationTest : public testing::Test {
+ protected:
+ static const int kNumTestContexts = 2;
+ static const uint8 kInitialValue = 0xBD;
+ static const int32 kNumCommandEntries = 500;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const size_t kTransferBufferSize = 512;
+
+ static const GLint kMaxCombinedTextureImageUnits = 8;
+ static const GLint kMaxCubeMapTextureSize = 64;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxRenderbufferSize = 64;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxTextureSize = 128;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVertexAttribs = 8;
+ static const GLint kMaxVertexTextureImageUnits = 0;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kNumCompressedTextureFormats = 0;
+ static const GLint kNumShaderBinaryFormats = 0;
+ static const GLuint kStartId = 1024;
+ static const GLuint kBuffersStartId =
+ GLES2Implementation::kClientSideArrayId + 2 * kNumTestContexts;
+ static const GLuint kFramebuffersStartId = 1;
+ static const GLuint kProgramsAndShadersStartId = 1;
+ static const GLuint kRenderbuffersStartId = 1;
+ static const GLuint kTexturesStartId = 1;
+ static const GLuint kQueriesStartId = 1;
+ static const GLuint kVertexArraysStartId = 1;
+
+ typedef MockTransferBuffer::ExpectedMemoryInfo ExpectedMemoryInfo;
+
+ class TestContext {
+ public:
+ TestContext() : commands_(NULL), token_(0) {}
+
+ bool Initialize(ShareGroup* share_group,
+ bool bind_generates_resource_client,
+ bool bind_generates_resource_service,
+ bool lose_context_when_out_of_memory) {
+ command_buffer_.reset(new StrictMock<MockClientCommandBuffer>());
+ if (!command_buffer_->Initialize())
+ return false;
+
+ transfer_buffer_.reset(
+ new MockTransferBuffer(command_buffer_.get(),
+ kTransferBufferSize,
+ GLES2Implementation::kStartingOffset,
+ GLES2Implementation::kAlignment));
+
+ helper_.reset(new GLES2CmdHelper(command_buffer()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+
+ gpu_control_.reset(new StrictMock<MockClientGpuControl>());
+ EXPECT_CALL(*gpu_control_, GetCapabilities())
+ .WillOnce(testing::Return(Capabilities()));
+
+ GLES2Implementation::GLStaticState state;
+ GLES2Implementation::GLStaticState::IntState& int_state = state.int_state;
+ int_state.max_combined_texture_image_units =
+ kMaxCombinedTextureImageUnits;
+ int_state.max_cube_map_texture_size = kMaxCubeMapTextureSize;
+ int_state.max_fragment_uniform_vectors = kMaxFragmentUniformVectors;
+ int_state.max_renderbuffer_size = kMaxRenderbufferSize;
+ int_state.max_texture_image_units = kMaxTextureImageUnits;
+ int_state.max_texture_size = kMaxTextureSize;
+ int_state.max_varying_vectors = kMaxVaryingVectors;
+ int_state.max_vertex_attribs = kMaxVertexAttribs;
+ int_state.max_vertex_texture_image_units = kMaxVertexTextureImageUnits;
+ int_state.max_vertex_uniform_vectors = kMaxVertexUniformVectors;
+ int_state.num_compressed_texture_formats = kNumCompressedTextureFormats;
+ int_state.num_shader_binary_formats = kNumShaderBinaryFormats;
+ int_state.bind_generates_resource_chromium =
+ bind_generates_resource_service ? 1 : 0;
+
+ // This just happens to work for now because IntState has 1 GLint per
+ // state.
+ // If IntState gets more complicated this code will need to get more
+ // complicated.
+ ExpectedMemoryInfo mem1 = transfer_buffer_->GetExpectedMemory(
+ sizeof(GLES2Implementation::GLStaticState::IntState) * 2 +
+ sizeof(cmds::GetShaderPrecisionFormat::Result) * 12);
+
+ {
+ InSequence sequence;
+
+ EXPECT_CALL(*command_buffer_, OnFlush())
+ .WillOnce(SetMemory(mem1.ptr + sizeof(int_state), int_state))
+ .RetiresOnSaturation();
+ GetNextToken(); // eat the token that starting up will use.
+
+ gl_.reset(new GLES2Implementation(helper_.get(),
+ share_group,
+ transfer_buffer_.get(),
+ bind_generates_resource_client,
+ lose_context_when_out_of_memory,
+ gpu_control_.get()));
+
+ if (!gl_->Initialize(kTransferBufferSize,
+ kTransferBufferSize,
+ kTransferBufferSize,
+ GLES2Implementation::kNoLimit))
+ return false;
+ }
+
+ EXPECT_CALL(*command_buffer_, OnFlush()).Times(1).RetiresOnSaturation();
+ helper_->CommandBufferHelper::Finish();
+ ::testing::Mock::VerifyAndClearExpectations(gl_.get());
+
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ commands_ = static_cast<CommandBufferEntry*>(ring_buffer->memory()) +
+ command_buffer()->GetLastState().put_offset;
+ ClearCommands();
+ EXPECT_TRUE(transfer_buffer_->InSync());
+
+ ::testing::Mock::VerifyAndClearExpectations(command_buffer());
+ return true;
+ }
+
+ void TearDown() {
+ Mock::VerifyAndClear(gl_.get());
+ EXPECT_CALL(*command_buffer(), OnFlush()).Times(AnyNumber());
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(AtLeast(1));
+ gl_.reset();
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ int GetNextToken() { return ++token_; }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ scoped_ptr<MockClientCommandBuffer> command_buffer_;
+ scoped_ptr<MockClientGpuControl> gpu_control_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MockTransferBuffer> transfer_buffer_;
+ scoped_ptr<GLES2Implementation> gl_;
+ CommandBufferEntry* commands_;
+ int token_;
+ };
+
+ GLES2ImplementationTest() : commands_(NULL) {}
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ bool NoCommandsWritten() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ const uint8* cmds = reinterpret_cast<const uint8*>(ring_buffer->memory());
+ const uint8* end = cmds + ring_buffer->size();
+ for (; cmds < end; ++cmds) {
+ if (*cmds != kInitialValue) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ QueryTracker::Query* GetQuery(GLuint id) {
+ return gl_->query_tracker_->GetQuery(id);
+ }
+
+ struct ContextInitOptions {
+ ContextInitOptions()
+ : bind_generates_resource_client(true),
+ bind_generates_resource_service(true),
+ lose_context_when_out_of_memory(false) {}
+
+ bool bind_generates_resource_client;
+ bool bind_generates_resource_service;
+ bool lose_context_when_out_of_memory;
+ };
+
+ bool Initialize(const ContextInitOptions& init_options) {
+ bool success = true;
+ share_group_ = new ShareGroup(init_options.bind_generates_resource_client);
+
+ for (int i = 0; i < kNumTestContexts; i++) {
+ if (!test_contexts_[i].Initialize(
+ share_group_.get(),
+ init_options.bind_generates_resource_client,
+ init_options.bind_generates_resource_service,
+ init_options.lose_context_when_out_of_memory))
+ success = false;
+ }
+
+ // Default to test context 0.
+ gpu_control_ = test_contexts_[0].gpu_control_.get();
+ helper_ = test_contexts_[0].helper_.get();
+ transfer_buffer_ = test_contexts_[0].transfer_buffer_.get();
+ gl_ = test_contexts_[0].gl_.get();
+ commands_ = test_contexts_[0].commands_;
+ return success;
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return test_contexts_[0].command_buffer_.get();
+ }
+
+ int GetNextToken() { return test_contexts_[0].GetNextToken(); }
+
+ const void* GetPut() {
+ return helper_->GetSpace(0);
+ }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ size_t MaxTransferBufferSize() {
+ return transfer_buffer_->MaxTransferBufferSize();
+ }
+
+ ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ return transfer_buffer_->GetExpectedMemory(size);
+ }
+
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ return transfer_buffer_->GetExpectedResultMemory(size);
+ }
+
+ // Sets the ProgramInfoManager. The manager will be owned
+ // by the ShareGroup.
+ void SetProgramInfoManager(ProgramInfoManager* manager) {
+ gl_->share_group()->set_program_info_manager(manager);
+ }
+
+ int CheckError() {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ return gl_->GetError();
+ }
+
+ const std::string& GetLastError() {
+ return gl_->GetLastError();
+ }
+
+ bool GetBucketContents(uint32 bucket_id, std::vector<int8>* data) {
+ return gl_->GetBucketContents(bucket_id, data);
+ }
+
+ TestContext test_contexts_[kNumTestContexts];
+
+ scoped_refptr<ShareGroup> share_group_;
+ MockClientGpuControl* gpu_control_;
+ GLES2CmdHelper* helper_;
+ MockTransferBuffer* transfer_buffer_;
+ GLES2Implementation* gl_;
+ CommandBufferEntry* commands_;
+};
+
+void GLES2ImplementationTest::SetUp() {
+ ContextInitOptions init_options;
+ ASSERT_TRUE(Initialize(init_options));
+}
+
+void GLES2ImplementationTest::TearDown() {
+ for (int i = 0; i < kNumTestContexts; i++)
+ test_contexts_[i].TearDown();
+}
+
+class GLES2ImplementationManualInitTest : public GLES2ImplementationTest {
+ protected:
+ virtual void SetUp() OVERRIDE {}
+};
+
+class GLES2ImplementationStrictSharedTest : public GLES2ImplementationTest {
+ protected:
+ virtual void SetUp() OVERRIDE;
+
+ template <class ResApi>
+ void FlushGenerationTest() {
+ GLuint id1, id2, id3;
+
+ // Generate valid id.
+ ResApi::Gen(gl_, 1, &id1);
+ EXPECT_NE(id1, 0u);
+
+ // Delete id1 and generate id2. id1 should not be reused.
+ ResApi::Delete(gl_, 1, &id1);
+ ResApi::Gen(gl_, 1, &id2);
+ EXPECT_NE(id2, 0u);
+ EXPECT_NE(id2, id1);
+
+ // Expect id1 reuse after Flush.
+ gl_->Flush();
+ ResApi::Gen(gl_, 1, &id3);
+ EXPECT_EQ(id3, id1);
+ }
+
+ // Ids should not be reused unless the |Deleting| context does a Flush()
+ // AND triggers a lazy release after that.
+ template <class ResApi>
+ void CrossContextGenerationTest() {
+ GLES2Implementation* gl1 = test_contexts_[0].gl_.get();
+ GLES2Implementation* gl2 = test_contexts_[1].gl_.get();
+ GLuint id1, id2, id3;
+
+ // Delete, no flush on context 1. No reuse.
+ ResApi::Gen(gl1, 1, &id1);
+ ResApi::Delete(gl1, 1, &id1);
+ ResApi::Gen(gl1, 1, &id2);
+ EXPECT_NE(id1, id2);
+
+ // Flush context 2. Still no reuse.
+ gl2->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+ EXPECT_NE(id2, id3);
+
+ // Flush on context 1, but no lazy release. Still no reuse.
+ gl1->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+
+ // Lazy release triggered by another Delete. Should reuse id1.
+ ResApi::Delete(gl1, 1, &id2);
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_EQ(id1, id3);
+ }
+
+ // Same as CrossContextGenerationTest(), but triggers an Auto Flush on
+ // the Delete(). Tests an edge case regression.
+ template <class ResApi>
+ void CrossContextGenerationAutoFlushTest() {
+ GLES2Implementation* gl1 = test_contexts_[0].gl_.get();
+ GLES2Implementation* gl2 = test_contexts_[1].gl_.get();
+ GLuint id1, id2, id3;
+
+ // Delete, no flush on context 1. No reuse.
+ // By half filling the buffer, an internal flush is forced on the Delete().
+ ResApi::Gen(gl1, 1, &id1);
+ gl1->helper()->Noop(kNumCommandEntries / 2);
+ ResApi::Delete(gl1, 1, &id1);
+ ResApi::Gen(gl1, 1, &id2);
+ EXPECT_NE(id1, id2);
+
+ // Flush context 2. Still no reuse.
+ gl2->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+ EXPECT_NE(id2, id3);
+
+ // Flush on context 1, but no lazy release. Still no reuse.
+ gl1->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+
+ // Lazy release triggered by another Delete. Should reuse id1.
+ ResApi::Delete(gl1, 1, &id2);
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_EQ(id1, id3);
+ }
+};
+
+void GLES2ImplementationStrictSharedTest::SetUp() {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = false;
+ init_options.bind_generates_resource_service = false;
+ ASSERT_TRUE(Initialize(init_options));
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const uint8 GLES2ImplementationTest::kInitialValue;
+const int32 GLES2ImplementationTest::kNumCommandEntries;
+const int32 GLES2ImplementationTest::kCommandBufferSizeBytes;
+const size_t GLES2ImplementationTest::kTransferBufferSize;
+const GLint GLES2ImplementationTest::kMaxCombinedTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxCubeMapTextureSize;
+const GLint GLES2ImplementationTest::kMaxFragmentUniformVectors;
+const GLint GLES2ImplementationTest::kMaxRenderbufferSize;
+const GLint GLES2ImplementationTest::kMaxTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxTextureSize;
+const GLint GLES2ImplementationTest::kMaxVaryingVectors;
+const GLint GLES2ImplementationTest::kMaxVertexAttribs;
+const GLint GLES2ImplementationTest::kMaxVertexTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxVertexUniformVectors;
+const GLint GLES2ImplementationTest::kNumCompressedTextureFormats;
+const GLint GLES2ImplementationTest::kNumShaderBinaryFormats;
+const GLuint GLES2ImplementationTest::kStartId;
+const GLuint GLES2ImplementationTest::kBuffersStartId;
+const GLuint GLES2ImplementationTest::kFramebuffersStartId;
+const GLuint GLES2ImplementationTest::kProgramsAndShadersStartId;
+const GLuint GLES2ImplementationTest::kRenderbuffersStartId;
+const GLuint GLES2ImplementationTest::kTexturesStartId;
+const GLuint GLES2ImplementationTest::kQueriesStartId;
+const GLuint GLES2ImplementationTest::kVertexArraysStartId;
+#endif
+
+TEST_F(GLES2ImplementationTest, Basic) {
+ EXPECT_TRUE(gl_->share_group() != NULL);
+}
+
+TEST_F(GLES2ImplementationTest, GetBucketContents) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const uint32 kTestSize = MaxTransferBufferSize() + 32;
+
+ scoped_ptr<uint8[]> buf(new uint8 [kTestSize]);
+ uint8* expected_data = buf.get();
+ for (uint32 ii = 0; ii < kTestSize; ++ii) {
+ expected_data[ii] = ii * 3;
+ }
+
+ struct Cmds {
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::GetBucketData get_bucket_data;
+ cmd::SetToken set_token2;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(sizeof(uint32));
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(
+ kTestSize - MaxTransferBufferSize());
+
+ Cmds expected;
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.get_bucket_data.Init(
+ kBucketId, MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize(), mem2.id, mem2.offset);
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ expected.set_token2.Init(GetNextToken());
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(
+ SetMemory(result1.ptr, kTestSize),
+ SetMemoryFromArray(
+ mem1.ptr, expected_data, MaxTransferBufferSize())))
+ .WillOnce(SetMemoryFromArray(
+ mem2.ptr, expected_data + MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize()))
+ .RetiresOnSaturation();
+
+ std::vector<int8> data;
+ GetBucketContents(kBucketId, &data);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ASSERT_EQ(kTestSize, data.size());
+ EXPECT_EQ(0, memcmp(expected_data, &data[0], data.size()));
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderPrecisionFormat) {
+ struct Cmds {
+ cmds::GetShaderPrecisionFormat cmd;
+ };
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+
+ // The first call for mediump should trigger a command buffer request.
+ GLint range1[2] = {0, 0};
+ GLint precision1 = 0;
+ Cmds expected1;
+ ExpectedMemoryInfo client_result1 = GetExpectedResultMemory(4);
+ expected1.cmd.Init(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ client_result1.id, client_result1.offset);
+ Result server_result1 = {true, 14, 14, 10};
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(client_result1.ptr, server_result1))
+ .RetiresOnSaturation();
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ range1, &precision1);
+ const void* commands2 = GetPut();
+ EXPECT_NE(commands_, commands2);
+ EXPECT_EQ(0, memcmp(&expected1, commands_, sizeof(expected1)));
+ EXPECT_EQ(range1[0], 14);
+ EXPECT_EQ(range1[1], 14);
+ EXPECT_EQ(precision1, 10);
+
+ // The second call for mediump should use the cached value and avoid
+ // triggering a command buffer request, so we do not expect a call to
+ // OnFlush() here. We do expect the results to be correct though.
+ GLint range2[2] = {0, 0};
+ GLint precision2 = 0;
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ range2, &precision2);
+ const void* commands3 = GetPut();
+ EXPECT_EQ(commands2, commands3);
+ EXPECT_EQ(range2[0], 14);
+ EXPECT_EQ(range2[1], 14);
+ EXPECT_EQ(precision2, 10);
+
+ // If we then make a request for highp, we should get another command
+ // buffer request since it hasn't been cached yet.
+ GLint range3[2] = {0, 0};
+ GLint precision3 = 0;
+ Cmds expected3;
+ ExpectedMemoryInfo result3 = GetExpectedResultMemory(4);
+ expected3.cmd.Init(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ result3.id, result3.offset);
+ Result result3_source = {true, 62, 62, 16};
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result3.ptr, result3_source))
+ .RetiresOnSaturation();
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ range3, &precision3);
+ const void* commands4 = GetPut();
+ EXPECT_NE(commands3, commands4);
+ EXPECT_EQ(0, memcmp(&expected3, commands3, sizeof(expected3)));
+ EXPECT_EQ(range3[0], 62);
+ EXPECT_EQ(range3[1], 62);
+ EXPECT_EQ(precision3, 16);
+}
+
+TEST_F(GLES2ImplementationTest, ShaderSource) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kShaderId = 456;
+ const char* kString1 = "foobar";
+ const char* kString2 = "barfoo";
+ const size_t kString1Size = strlen(kString1);
+ const size_t kString2Size = strlen(kString2);
+ const size_t kString3Size = 1; // Want the NULL;
+ const size_t kSourceSize = kString1Size + kString2Size + kString3Size;
+ const size_t kPaddedString1Size =
+ transfer_buffer_->RoundToAlignment(kString1Size);
+ const size_t kPaddedString2Size =
+ transfer_buffer_->RoundToAlignment(kString2Size);
+ const size_t kPaddedString3Size =
+ transfer_buffer_->RoundToAlignment(kString3Size);
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size;
+ cmd::SetBucketData set_bucket_data1;
+ cmd::SetToken set_token1;
+ cmd::SetBucketData set_bucket_data2;
+ cmd::SetToken set_token2;
+ cmd::SetBucketData set_bucket_data3;
+ cmd::SetToken set_token3;
+ cmds::ShaderSourceBucket shader_source_bucket;
+ cmd::SetBucketSize clear_bucket_size;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kPaddedString1Size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kPaddedString2Size);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kPaddedString3Size);
+
+ Cmds expected;
+ expected.set_bucket_size.Init(kBucketId, kSourceSize);
+ expected.set_bucket_data1.Init(
+ kBucketId, 0, kString1Size, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_data2.Init(
+ kBucketId, kString1Size, kString2Size, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_bucket_data3.Init(
+ kBucketId, kString1Size + kString2Size,
+ kString3Size, mem3.id, mem3.offset);
+ expected.set_token3.Init(GetNextToken());
+ expected.shader_source_bucket.Init(kShaderId, kBucketId);
+ expected.clear_bucket_size.Init(kBucketId, 0);
+ const char* strings[] = {
+ kString1,
+ kString2,
+ };
+ gl_->ShaderSource(kShaderId, 2, strings, NULL);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderSource) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kShaderId = 456;
+ const Str7 kString = {"foobar"};
+ const char kBad = 0x12;
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetShaderSource get_shader_source;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(sizeof(uint32));
+
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_shader_source.Init(kShaderId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ char buf[sizeof(kString) + 1];
+ memset(buf, kBad, sizeof(buf));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ GLsizei length = 0;
+ gl_->GetShaderSource(kShaderId, sizeof(buf), &length, buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(sizeof(kString) - 1, static_cast<size_t>(length));
+ EXPECT_STREQ(kString.str, buf);
+ EXPECT_EQ(buf[sizeof(kString)], kBad);
+}
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+TEST_F(GLES2ImplementationTest, DrawArraysClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawArrays draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLint kFirst = 1;
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kFirst, kCount);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawArrays(GL_POINTS, kFirst, kCount);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLEClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::VertexAttribDivisorANGLE divisor;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawArraysInstancedANGLE draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLint kFirst = 1;
+ const GLsizei kCount = 2;
+ const GLuint kDivisor = 1;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ 1 * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.divisor.Init(kAttribIndex2, kDivisor);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kFirst, kCount, 1);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribDivisorANGLE(kAttribIndex2, kDivisor);
+ gl_->DrawArraysInstancedANGLE(GL_POINTS, kFirst, kCount, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint16 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, 0);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_SHORT, indices);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffersIndexUint) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint32 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_INT, 0);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_INT, indices);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffersInvalidIndexUint) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint32 indices[] = {
+ 1, 0x90000000
+ };
+
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .Times(1)
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_INT, indices);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest,
+ DrawElementsClientSideBuffersServiceSideIndices) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index;
+ cmds::GetMaxValueInBufferCHROMIUM get_max;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kClientIndexBufferId = 0x789;
+ const GLuint kIndexOffset = 0x40;
+ const GLuint kMaxIndex = 2;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedResultMemory(sizeof(uint32));
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index.Init(GL_ELEMENT_ARRAY_BUFFER, kClientIndexBufferId);
+ expected.get_max.Init(kClientIndexBufferId, kCount, GL_UNSIGNED_SHORT,
+ kIndexOffset, mem1.id, mem1.offset);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, kIndexOffset);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(mem1.ptr,kMaxIndex))
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, kClientIndexBufferId);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_SHORT,
+ reinterpret_cast<const void*>(kIndexOffset));
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsInstancedANGLEClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint16 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::VertexAttribDivisorANGLE divisor;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElementsInstancedANGLE draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ 1 * kNumComponents2 * sizeof(verts[0][0]);
+ const GLuint kDivisor = 1;
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.divisor.Init(kAttribIndex2, kDivisor);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, 0, 1);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribDivisorANGLE(kAttribIndex2, kDivisor);
+ gl_->DrawElementsInstancedANGLE(
+ GL_POINTS, kCount, GL_UNSIGNED_SHORT, indices, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexBufferPointerv) {
+ static const float verts[1] = { 0.0f, };
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kStride1 = 12;
+ const GLsizei kStride2 = 0;
+ const GLuint kBufferId = 0x123;
+ const GLint kOffset2 = 0x456;
+
+ // It's all cached on the client side so no get commands are issued.
+ struct Cmds {
+ cmds::BindBuffer bind;
+ cmds::VertexAttribPointer set_pointer;
+ };
+
+ Cmds expected;
+ expected.bind.Init(GL_ARRAY_BUFFER, kBufferId);
+ expected.set_pointer.Init(kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE,
+ kStride2, kOffset2);
+
+ // Set one client side buffer.
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kStride1, verts);
+ // Set one VBO
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kBufferId);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kStride2,
+ reinterpret_cast<const void*>(kOffset2));
+ // now get them both.
+ void* ptr1 = NULL;
+ void* ptr2 = NULL;
+
+ gl_->GetVertexAttribPointerv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr1);
+ gl_->GetVertexAttribPointerv(
+ kAttribIndex2, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr2);
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(static_cast<const void*>(&verts) == ptr1);
+ EXPECT_TRUE(ptr2 == reinterpret_cast<void*>(kOffset2));
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexAttrib) {
+ static const float verts[1] = { 0.0f, };
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kStride1 = 12;
+ const GLsizei kStride2 = 0;
+ const GLuint kBufferId = 0x123;
+ const GLint kOffset2 = 0x456;
+
+ // Only one set and one get because the client side buffer's info is stored
+ // on the client side.
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable;
+ cmds::BindBuffer bind;
+ cmds::VertexAttribPointer set_pointer;
+ cmds::GetVertexAttribfv get2; // for getting the value from attrib1
+ };
+
+ ExpectedMemoryInfo mem2 = GetExpectedResultMemory(16);
+
+ Cmds expected;
+ expected.enable.Init(kAttribIndex1);
+ expected.bind.Init(GL_ARRAY_BUFFER, kBufferId);
+ expected.set_pointer.Init(kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE,
+ kStride2, kOffset2);
+ expected.get2.Init(kAttribIndex1,
+ GL_CURRENT_VERTEX_ATTRIB,
+ mem2.id, mem2.offset);
+
+ FourFloats current_attrib(1.2f, 3.4f, 5.6f, 7.8f);
+
+ // One call to flush to wait for last call to GetVertexAttribiv
+ // as others are all cached.
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(
+ mem2.ptr, SizedResultHelper<FourFloats>(current_attrib)))
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ // Set one client side buffer.
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kStride1, verts);
+ // Set one VBO
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kBufferId);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kStride2,
+ reinterpret_cast<const void*>(kOffset2));
+ // first get the service side once to see that we make a command
+ GLint buffer_id = 0;
+ GLint enabled = 0;
+ GLint size = 0;
+ GLint stride = 0;
+ GLint type = 0;
+ GLint normalized = 1;
+ float current[4] = { 0.0f, };
+
+ gl_->GetVertexAttribiv(
+ kAttribIndex2, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, &buffer_id);
+ EXPECT_EQ(kBufferId, static_cast<GLuint>(buffer_id));
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, &buffer_id);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_ENABLED, &enabled);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_SIZE, &size);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_STRIDE, &stride);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_TYPE, &type);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &normalized);
+ gl_->GetVertexAttribfv(
+ kAttribIndex1, GL_CURRENT_VERTEX_ATTRIB, ¤t[0]);
+
+ EXPECT_EQ(0, buffer_id);
+ EXPECT_EQ(GL_TRUE, enabled);
+ EXPECT_EQ(kNumComponents1, size);
+ EXPECT_EQ(kStride1, stride);
+ EXPECT_EQ(GL_FLOAT, type);
+ EXPECT_EQ(GL_FALSE, normalized);
+ EXPECT_EQ(0, memcmp(¤t_attrib, ¤t, sizeof(current_attrib)));
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReservedIds) {
+ // Only the get error command should be issued.
+ struct Cmds {
+ cmds::GetError get;
+ };
+ Cmds expected;
+
+ ExpectedMemoryInfo mem1 = GetExpectedResultMemory(
+ sizeof(cmds::GetError::Result));
+
+ expected.get.Init(mem1.id, mem1.offset);
+
+ // One call to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(mem1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gl_->BindBuffer(
+ GL_ARRAY_BUFFER,
+ GLES2Implementation::kClientSideArrayId);
+ gl_->BindBuffer(
+ GL_ARRAY_BUFFER,
+ GLES2Implementation::kClientSideElementArrayId);
+ GLenum err = gl_->GetError();
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), err);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+TEST_F(GLES2ImplementationTest, ReadPixels2Reads) {
+ struct Cmds {
+ cmds::ReadPixels read1;
+ cmd::SetToken set_token1;
+ cmds::ReadPixels read2;
+ cmd::SetToken set_token2;
+ };
+ const GLint kBytesPerPixel = 4;
+ const GLint kWidth =
+ (kTransferBufferSize - GLES2Implementation::kStartingOffset) /
+ kBytesPerPixel;
+ const GLint kHeight = 2;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(kWidth * kHeight / 2 * kBytesPerPixel);
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+ ExpectedMemoryInfo mem2 =
+ GetExpectedMemory(kWidth * kHeight / 2 * kBytesPerPixel);
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+
+ Cmds expected;
+ expected.read1.Init(
+ 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem1.id, mem1.offset, result1.id, result1.offset,
+ false);
+ expected.set_token1.Init(GetNextToken());
+ expected.read2.Init(
+ 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem2.id, mem2.offset, result2.id, result2.offset, false);
+ expected.set_token2.Init(GetNextToken());
+ scoped_ptr<int8[]> buffer(new int8[kWidth * kHeight * kBytesPerPixel]);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, static_cast<uint32>(1)))
+ .WillOnce(SetMemory(result2.ptr, static_cast<uint32>(1)))
+ .RetiresOnSaturation();
+
+ gl_->ReadPixels(0, 0, kWidth, kHeight, kFormat, kType, buffer.get());
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReadPixelsBadFormatType) {
+ struct Cmds {
+ cmds::ReadPixels read;
+ cmd::SetToken set_token;
+ };
+ const GLint kBytesPerPixel = 4;
+ const GLint kWidth = 2;
+ const GLint kHeight = 2;
+ const GLenum kFormat = 0;
+ const GLenum kType = 0;
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(kWidth * kHeight * kBytesPerPixel);
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+
+ Cmds expected;
+ expected.read.Init(
+ 0, 0, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset, result1.id, result1.offset, false);
+ expected.set_token.Init(GetNextToken());
+ scoped_ptr<int8[]> buffer(new int8[kWidth * kHeight * kBytesPerPixel]);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .Times(1)
+ .RetiresOnSaturation();
+
+ gl_->ReadPixels(0, 0, kWidth, kHeight, kFormat, kType, buffer.get());
+}
+
+TEST_F(GLES2ImplementationTest, FreeUnusedSharedMemory) {
+ struct Cmds {
+ cmds::BufferSubData buf;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize);
+
+ Cmds expected;
+ expected.buf.Init(
+ kTarget, kOffset, kSize, mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapBufferSubDataCHROMIUM(
+ kTarget, kOffset, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ gl_->FreeUnusedSharedMemory();
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapBufferSubDataCHROMIUM) {
+ struct Cmds {
+ cmds::BufferSubData buf;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ uint32 offset = 0;
+ Cmds expected;
+ expected.buf.Init(
+ kTarget, kOffset, kSize,
+ command_buffer()->GetNextFreeTransferBufferId(), offset);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapBufferSubDataCHROMIUM(
+ kTarget, kOffset, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapBufferSubDataCHROMIUMBadArgs) {
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ void* mem;
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, -1, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, kOffset, -1, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, kOffset, kSize, GL_READ_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ const char* kPtr = "something";
+ gl_->UnmapBufferSubDataCHROMIUM(kPtr);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapTexSubImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::TexSubImage2D tex;
+ cmd::SetToken set_token;
+ };
+ const GLint kLevel = 1;
+ const GLint kXOffset = 2;
+ const GLint kYOffset = 3;
+ const GLint kWidth = 4;
+ const GLint kHeight = 5;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ uint32 offset = 0;
+ Cmds expected;
+ expected.tex.Init(
+ GL_TEXTURE_2D, kLevel, kXOffset, kYOffset, kWidth, kHeight, kFormat,
+ kType,
+ command_buffer()->GetNextFreeTransferBufferId(), offset, GL_FALSE);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapTexSubImage2DCHROMIUM(mem);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapTexSubImage2DCHROMIUMBadArgs) {
+ const GLint kLevel = 1;
+ const GLint kXOffset = 2;
+ const GLint kYOffset = 3;
+ const GLint kWidth = 4;
+ const GLint kHeight = 5;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result5 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result6 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result7 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result5.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result6.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result7.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ void* mem;
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ -1,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ -1,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ -1,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ -1,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ -1,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_READ_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ const char* kPtr = "something";
+ gl_->UnmapTexSubImage2DCHROMIUM(kPtr);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetMultipleIntegervCHROMIUMValidArgs) {
+ const GLenum pnames[] = {
+ GL_DEPTH_WRITEMASK,
+ GL_COLOR_WRITEMASK,
+ GL_STENCIL_WRITEMASK,
+ };
+ const GLint num_results = 6;
+ GLint results[num_results + 1];
+ struct Cmds {
+ cmds::GetMultipleIntegervCHROMIUM get_multiple;
+ cmd::SetToken set_token;
+ };
+ const GLsizei kNumPnames = arraysize(pnames);
+ const GLsizeiptr kResultsSize = num_results * sizeof(results[0]);
+ const size_t kPNamesSize = kNumPnames * sizeof(pnames[0]);
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kPNamesSize + kResultsSize);
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(
+ sizeof(cmds::GetError::Result));
+
+ const uint32 kPnamesOffset = mem1.offset;
+ const uint32 kResultsOffset = mem1.offset + kPNamesSize;
+ Cmds expected;
+ expected.get_multiple.Init(
+ mem1.id, kPnamesOffset, kNumPnames,
+ mem1.id, kResultsOffset, kResultsSize);
+ expected.set_token.Init(GetNextToken());
+
+ const GLint kSentinel = 0x12345678;
+ memset(results, 0, sizeof(results));
+ results[num_results] = kSentinel;
+ const GLint returned_results[] = {
+ 1, 0, 1, 0, 1, -1,
+ };
+ // One call to flush to wait for results
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemoryFromArray(mem1.ptr + kPNamesSize,
+ returned_results, sizeof(returned_results)))
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(0, memcmp(&returned_results, results, sizeof(returned_results)));
+ EXPECT_EQ(kSentinel, results[num_results]);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetMultipleIntegervCHROMIUMBadArgs) {
+ GLenum pnames[] = {
+ GL_DEPTH_WRITEMASK,
+ GL_COLOR_WRITEMASK,
+ GL_STENCIL_WRITEMASK,
+ };
+ const GLint num_results = 6;
+ GLint results[num_results + 1];
+ const GLsizei kNumPnames = arraysize(pnames);
+ const GLsizeiptr kResultsSize = num_results * sizeof(results[0]);
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ const GLint kSentinel = 0x12345678;
+ memset(results, 0, sizeof(results));
+ results[num_results] = kSentinel;
+ // try bad size.
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize + 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try bad size.
+ ClearCommands();
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize - 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try uncleared results.
+ ClearCommands();
+ results[2] = 1;
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try bad enum results.
+ ClearCommands();
+ results[2] = 0;
+ pnames[1] = GL_TRUE;
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramInfoCHROMIUMGoodArgs) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kProgramId = 123;
+ const char kBad = 0x12;
+ GLsizei size = 0;
+ const Str7 kString = {"foobar"};
+ char buf[20];
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ memset(buf, kBad, sizeof(buf));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetProgramInfoCHROMIUM get_program_info;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_program_info.Init(kProgramId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ gl_->GetProgramInfoCHROMIUM(kProgramId, sizeof(buf), &size, &buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+ EXPECT_EQ(sizeof(kString), static_cast<size_t>(size));
+ EXPECT_STREQ(kString.str, buf);
+ EXPECT_EQ(buf[sizeof(kString)], kBad);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramInfoCHROMIUMBadArgs) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kProgramId = 123;
+ GLsizei size = 0;
+ const Str7 kString = {"foobar"};
+ char buf[20];
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ // try bufsize not big enough.
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetProgramInfoCHROMIUM get_program_info;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_program_info.Init(kProgramId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ gl_->GetProgramInfoCHROMIUM(kProgramId, 6, &size, &buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), gl_->GetError());
+ ClearCommands();
+
+ // try bad bufsize
+ gl_->GetProgramInfoCHROMIUM(kProgramId, -1, &size, &buf);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ ClearCommands();
+ // try no size ptr.
+ gl_->GetProgramInfoCHROMIUM(kProgramId, sizeof(buf), NULL, &buf);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+// Test that things are cached
+TEST_F(GLES2ImplementationTest, GetIntegerCacheRead) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ const PNameValue pairs[] = {
+ {GL_ACTIVE_TEXTURE, GL_TEXTURE0, },
+ {GL_TEXTURE_BINDING_2D, 0, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 0, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 0, },
+ {GL_FRAMEBUFFER_BINDING, 0, },
+ {GL_RENDERBUFFER_BINDING, 0, },
+ {GL_ARRAY_BUFFER_BINDING, 0, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 0, },
+ {GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, kMaxCombinedTextureImageUnits, },
+ {GL_MAX_CUBE_MAP_TEXTURE_SIZE, kMaxCubeMapTextureSize, },
+ {GL_MAX_FRAGMENT_UNIFORM_VECTORS, kMaxFragmentUniformVectors, },
+ {GL_MAX_RENDERBUFFER_SIZE, kMaxRenderbufferSize, },
+ {GL_MAX_TEXTURE_IMAGE_UNITS, kMaxTextureImageUnits, },
+ {GL_MAX_TEXTURE_SIZE, kMaxTextureSize, },
+ {GL_MAX_VARYING_VECTORS, kMaxVaryingVectors, },
+ {GL_MAX_VERTEX_ATTRIBS, kMaxVertexAttribs, },
+ {GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, kMaxVertexTextureImageUnits, },
+ {GL_MAX_VERTEX_UNIFORM_VECTORS, kMaxVertexUniformVectors, },
+ {GL_NUM_COMPRESSED_TEXTURE_FORMATS, kNumCompressedTextureFormats, },
+ {GL_NUM_SHADER_BINARY_FORMATS, kNumShaderBinaryFormats, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetIntegerCacheWrite) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ gl_->ActiveTexture(GL_TEXTURE4);
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 3);
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 4);
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 5);
+ gl_->BindTexture(GL_TEXTURE_2D, 6);
+ gl_->BindTexture(GL_TEXTURE_CUBE_MAP, 7);
+ gl_->BindTexture(GL_TEXTURE_EXTERNAL_OES, 8);
+
+ const PNameValue pairs[] = {{GL_ACTIVE_TEXTURE, GL_TEXTURE4, },
+ {GL_ARRAY_BUFFER_BINDING, 2, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 3, },
+ {GL_FRAMEBUFFER_BINDING, 4, },
+ {GL_RENDERBUFFER_BINDING, 5, },
+ {GL_TEXTURE_BINDING_2D, 6, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 7, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 8, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+static bool CheckRect(
+ int width, int height, GLenum format, GLenum type, int alignment,
+ bool flip_y, const uint8* r1, const uint8* r2) {
+ uint32 size = 0;
+ uint32 unpadded_row_size = 0;
+ uint32 padded_row_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, alignment, &size, &unpadded_row_size,
+ &padded_row_size)) {
+ return false;
+ }
+
+ int r2_stride = flip_y ?
+ -static_cast<int>(padded_row_size) :
+ static_cast<int>(padded_row_size);
+ r2 = flip_y ? (r2 + (height - 1) * padded_row_size) : r2;
+
+ for (int y = 0; y < height; ++y) {
+ if (memcmp(r1, r2, unpadded_row_size) != 0) {
+ return false;
+ }
+ r1 += padded_row_size;
+ r2 += r2_stride;
+ }
+ return true;
+}
+
+ACTION_P8(CheckRectAction, width, height, format, type, alignment, flip_y,
+ r1, r2) {
+ EXPECT_TRUE(CheckRect(
+ width, height, format, type, alignment, flip_y, r1, r2));
+}
+
+// Test TexImage2D with and without flip_y
+TEST_F(GLES2ImplementationTest, TexImage2D) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ struct Cmds2 {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 4;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ static uint8 pixels[] = {
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 101, 102, 103,
+ 21, 22, 23, 23, 24, 25, 25, 26, 27, 201, 202, 203,
+ 31, 32, 33, 33, 34, 35, 35, 36, 37, 123, 124, 125,
+ 41, 42, 43, 43, 44, 45, 45, 46, 47,
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sizeof(pixels));
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels, mem1.ptr));
+
+ ClearCommands();
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(sizeof(pixels));
+ Cmds2 expected2;
+ expected2.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem2.id, mem2.offset);
+ expected2.set_token.Init(GetNextToken());
+ const void* commands2 = GetPut();
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected2, commands2, sizeof(expected2)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ pixels, mem2.ptr));
+}
+
+// Test TexImage2D with 2 writes
+TEST_F(GLES2ImplementationTest, TexImage2D2Writes) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmds::TexSubImage2D tex_sub_image_2d1;
+ cmd::SetToken set_token1;
+ cmds::TexSubImage2D tex_sub_image_2d2;
+ cmd::SetToken set_token2;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ const GLsizei kWidth = 3;
+
+ uint32 size = 0;
+ uint32 unpadded_row_size = 0;
+ uint32 padded_row_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &size, &unpadded_row_size, &padded_row_size));
+ const GLsizei kHeight = (MaxTransferBufferSize() / padded_row_size) * 2;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment,
+ &size, NULL, NULL));
+ uint32 half_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &half_size, NULL, NULL));
+
+ scoped_ptr<uint8[]> pixels(new uint8[size]);
+ for (uint32 ii = 0; ii < size; ++ii) {
+ pixels[ii] = static_cast<uint8>(ii);
+ }
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(half_size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(half_size);
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ 0, 0);
+ expected.tex_sub_image_2d1.Init(
+ kTarget, kLevel, 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem1.id, mem1.offset, true);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(
+ kTarget, kLevel, 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem2.id, mem2.offset, true);
+ expected.set_token2.Init(GetNextToken());
+
+ // TODO(gman): Make it possible to run this test
+ // EXPECT_CALL(*command_buffer(), OnFlush())
+ // .WillOnce(CheckRectAction(
+ // kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ // false, pixels.get(),
+ // GetExpectedTransferAddressFromOffsetAs<uint8>(offset1, half_size)))
+ // .RetiresOnSaturation();
+
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels.get());
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels.get() + kHeight / 2 * padded_row_size, mem2.ptr));
+
+ ClearCommands();
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ const void* commands2 = GetPut();
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(half_size);
+ ExpectedMemoryInfo mem4 = GetExpectedMemory(half_size);
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ 0, 0);
+ expected.tex_sub_image_2d1.Init(
+ kTarget, kLevel, 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem3.id, mem3.offset, true);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(
+ kTarget, kLevel, 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem4.id, mem4.offset, true);
+ expected.set_token2.Init(GetNextToken());
+
+ // TODO(gman): Make it possible to run this test
+ // EXPECT_CALL(*command_buffer(), OnFlush())
+ // .WillOnce(CheckRectAction(
+ // kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ // true, pixels.get(),
+ // GetExpectedTransferAddressFromOffsetAs<uint8>(offset3, half_size)))
+ // .RetiresOnSaturation();
+
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels.get());
+ EXPECT_EQ(0, memcmp(&expected, commands2, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ pixels.get() + kHeight / 2 * padded_row_size, mem4.ptr));
+}
+
+// Test TexSubImage2D with GL_PACK_FLIP_Y set and partial multirow transfers
+TEST_F(GLES2ImplementationTest, TexSubImage2DFlipY) {
+ const GLsizei kTextureWidth = MaxTransferBufferSize() / 4;
+ const GLsizei kTextureHeight = 7;
+ const GLsizei kSubImageWidth = MaxTransferBufferSize() / 8;
+ const GLsizei kSubImageHeight = 4;
+ const GLint kSubImageXOffset = 1;
+ const GLint kSubImageYOffset = 2;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLint kBorder = 0;
+ const GLint kPixelStoreUnpackAlignment = 4;
+
+ struct Cmds {
+ cmds::PixelStorei pixel_store_i1;
+ cmds::TexImage2D tex_image_2d;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexSubImage2D tex_sub_image_2d1;
+ cmd::SetToken set_token1;
+ cmds::TexSubImage2D tex_sub_image_2d2;
+ cmd::SetToken set_token2;
+ };
+
+ uint32 sub_2_high_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSubImageWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &sub_2_high_size, NULL, NULL));
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sub_2_high_size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(sub_2_high_size);
+
+ Cmds expected;
+ expected.pixel_store_i1.Init(GL_UNPACK_ALIGNMENT, kPixelStoreUnpackAlignment);
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kTextureWidth, kTextureHeight, kFormat,
+ kType, 0, 0);
+ expected.pixel_store_i2.Init(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ expected.tex_sub_image_2d1.Init(kTarget, kLevel, kSubImageXOffset,
+ kSubImageYOffset + 2, kSubImageWidth, 2, kFormat, kType,
+ mem1.id, mem1.offset, false);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(kTarget, kLevel, kSubImageXOffset,
+ kSubImageYOffset, kSubImageWidth , 2, kFormat, kType,
+ mem2.id, mem2.offset, false);
+ expected.set_token2.Init(GetNextToken());
+
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, kPixelStoreUnpackAlignment);
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kTextureWidth, kTextureHeight, kBorder, kFormat,
+ kType, NULL);
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ scoped_ptr<uint32[]> pixels(new uint32[kSubImageWidth * kSubImageHeight]);
+ for (int y = 0; y < kSubImageHeight; ++y) {
+ for (int x = 0; x < kSubImageWidth; ++x) {
+ pixels.get()[kSubImageWidth * y + x] = x | (y << 16);
+ }
+ }
+ gl_->TexSubImage2D(
+ GL_TEXTURE_2D, 0, kSubImageXOffset, kSubImageYOffset, kSubImageWidth,
+ kSubImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels.get());
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kSubImageWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ reinterpret_cast<uint8*>(pixels.get() + 2 * kSubImageWidth),
+ mem2.ptr));
+}
+
+TEST_F(GLES2ImplementationTest, SubImageUnpack) {
+ static const GLint unpack_alignments[] = { 1, 2, 4, 8 };
+
+ static const GLenum kFormat = GL_RGB;
+ static const GLenum kType = GL_UNSIGNED_BYTE;
+ static const GLint kLevel = 0;
+ static const GLint kBorder = 0;
+ // We're testing using the unpack params to pull a subimage out of a larger
+ // source of pixels. Here we specify the subimage by its border rows /
+ // columns.
+ static const GLint kSrcWidth = 33;
+ static const GLint kSrcSubImageX0 = 11;
+ static const GLint kSrcSubImageX1 = 20;
+ static const GLint kSrcSubImageY0 = 18;
+ static const GLint kSrcSubImageY1 = 23;
+ static const GLint kSrcSubImageWidth = kSrcSubImageX1 - kSrcSubImageX0;
+ static const GLint kSrcSubImageHeight = kSrcSubImageY1 - kSrcSubImageY0;
+
+ // these are only used in the texsubimage tests
+ static const GLint kTexWidth = 1023;
+ static const GLint kTexHeight = 511;
+ static const GLint kTexSubXOffset = 419;
+ static const GLint kTexSubYOffset = 103;
+
+ struct {
+ cmds::PixelStorei pixel_store_i;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexImage2D tex_image_2d;
+ } texImageExpected;
+
+ struct {
+ cmds::PixelStorei pixel_store_i;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexImage2D tex_image_2d;
+ cmds::TexSubImage2D tex_sub_image_2d;
+ } texSubImageExpected;
+
+ uint32 src_size;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSrcWidth, kSrcSubImageY1, kFormat, kType, 8, &src_size, NULL, NULL));
+ scoped_ptr<uint8[]> src_pixels;
+ src_pixels.reset(new uint8[src_size]);
+ for (size_t i = 0; i < src_size; ++i) {
+ src_pixels[i] = static_cast<int8>(i);
+ }
+
+ for (int sub = 0; sub < 2; ++sub) {
+ for (int flip_y = 0; flip_y < 2; ++flip_y) {
+ for (size_t a = 0; a < arraysize(unpack_alignments); ++a) {
+ GLint alignment = unpack_alignments[a];
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType, alignment,
+ &size, &unpadded_row_size, &padded_row_size));
+ ASSERT_TRUE(size <= MaxTransferBufferSize());
+ ExpectedMemoryInfo mem = GetExpectedMemory(size);
+
+ const void* commands = GetPut();
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, alignment);
+ gl_->PixelStorei(GL_UNPACK_ROW_LENGTH_EXT, kSrcWidth);
+ gl_->PixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, kSrcSubImageX0);
+ gl_->PixelStorei(GL_UNPACK_SKIP_ROWS_EXT, kSrcSubImageY0);
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ if (sub) {
+ gl_->TexImage2D(
+ GL_TEXTURE_2D, kLevel, kFormat, kTexWidth, kTexHeight, kBorder,
+ kFormat, kType, NULL);
+ gl_->TexSubImage2D(
+ GL_TEXTURE_2D, kLevel, kTexSubXOffset, kTexSubYOffset,
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType,
+ src_pixels.get());
+ texSubImageExpected.pixel_store_i.Init(
+ GL_UNPACK_ALIGNMENT, alignment);
+ texSubImageExpected.pixel_store_i2.Init(
+ GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ texSubImageExpected.tex_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kFormat, kTexWidth, kTexHeight,
+ kFormat, kType, 0, 0);
+ texSubImageExpected.tex_sub_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kTexSubXOffset, kTexSubYOffset,
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType, mem.id,
+ mem.offset, GL_FALSE);
+ EXPECT_EQ(0, memcmp(
+ &texSubImageExpected, commands, sizeof(texSubImageExpected)));
+ } else {
+ gl_->TexImage2D(
+ GL_TEXTURE_2D, kLevel, kFormat,
+ kSrcSubImageWidth, kSrcSubImageHeight, kBorder, kFormat, kType,
+ src_pixels.get());
+ texImageExpected.pixel_store_i.Init(GL_UNPACK_ALIGNMENT, alignment);
+ texImageExpected.pixel_store_i2.Init(
+ GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ texImageExpected.tex_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kFormat, kSrcSubImageWidth,
+ kSrcSubImageHeight, kFormat, kType, mem.id, mem.offset);
+ EXPECT_EQ(0, memcmp(
+ &texImageExpected, commands, sizeof(texImageExpected)));
+ }
+ uint32 src_padded_row_size;
+ ASSERT_TRUE(GLES2Util::ComputeImagePaddedRowSize(
+ kSrcWidth, kFormat, kType, alignment, &src_padded_row_size));
+ uint32 bytes_per_group = GLES2Util::ComputeImageGroupSize(
+ kFormat, kType);
+ for (int y = 0; y < kSrcSubImageHeight; ++y) {
+ GLint src_sub_y = flip_y ? kSrcSubImageHeight - y - 1 : y;
+ const uint8* src_row = src_pixels.get() +
+ (kSrcSubImageY0 + src_sub_y) * src_padded_row_size +
+ bytes_per_group * kSrcSubImageX0;
+ const uint8* dst_row = mem.ptr + y * padded_row_size;
+ EXPECT_EQ(0, memcmp(src_row, dst_row, unpadded_row_size));
+ }
+ ClearCommands();
+ }
+ }
+ }
+}
+
+// Test texture related calls with invalid arguments.
+TEST_F(GLES2ImplementationTest, TextureInvalidArguments) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 4;
+ const GLint kBorder = 0;
+ const GLint kInvalidBorder = 1;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ static uint8 pixels[] = {
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 101, 102, 103,
+ 21, 22, 23, 23, 24, 25, 25, 26, 27, 201, 202, 203,
+ 31, 32, 33, 33, 34, 35, 35, 36, 37, 123, 124, 125,
+ 41, 42, 43, 43, 44, 45, 45, 46, 47,
+ };
+
+ // Verify that something works.
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sizeof(pixels));
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels, mem1.ptr));
+
+ ClearCommands();
+
+ // Use invalid border.
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kInvalidBorder, kFormat, kType,
+ pixels);
+
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ ClearCommands();
+
+ gl_->AsyncTexImage2DCHROMIUM(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kInvalidBorder, kFormat, kType,
+ NULL);
+
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ ClearCommands();
+
+ // Checking for CompressedTexImage2D argument validation is a bit tricky due
+ // to (runtime-detected) compression formats. Try to infer the error with an
+ // aux check.
+ const GLenum kCompressedFormat = GL_ETC1_RGB8_OES;
+ gl_->CompressedTexImage2D(
+ kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kBorder,
+ arraysize(pixels), pixels);
+
+ // In the above, kCompressedFormat and arraysize(pixels) are possibly wrong
+ // values. First ensure that these do not cause failures at the client. If
+ // this check ever fails, it probably means that client checks more than at
+ // the time of writing of this test. In this case, more code needs to be
+ // written for this test.
+ EXPECT_FALSE(NoCommandsWritten());
+
+ ClearCommands();
+
+ // Changing border to invalid border should make the call fail at the client
+ // checks.
+ gl_->CompressedTexImage2D(
+ kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kInvalidBorder,
+ arraysize(pixels), pixels);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+
+// Binds can not be cached with bind_generates_resource = false because
+// our id might not be valid. More specifically if you bind on contextA then
+// delete on contextB the resource is still bound on contextA but GetInterger
+// won't return an id.
+TEST_F(GLES2ImplementationStrictSharedTest, BindsNotCached) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ const PNameValue pairs[] = {{GL_TEXTURE_BINDING_2D, 1, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 2, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 3, },
+ {GL_FRAMEBUFFER_BINDING, 4, },
+ {GL_RENDERBUFFER_BINDING, 5, },
+ {GL_ARRAY_BUFFER_BINDING, 6, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 7, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetIntegerv::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr,
+ SizedResultHelper<GLuint>(pv.expected)))
+ .RetiresOnSaturation();
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_EQ(pv.expected, v);
+ }
+}
+
+// glGen* Ids must not be reused until glDelete* commands have been
+// flushed by glFlush.
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestBuffers) {
+ FlushGenerationTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestFramebuffers) {
+ FlushGenerationTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestRenderbuffers) {
+ FlushGenerationTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestTextures) {
+ FlushGenerationTest<GenTexturesAPI>();
+}
+
+// glGen* Ids must not be reused cross-context until glDelete* commands are
+// flushed by glFlush, and the Ids are lazily freed after.
+TEST_F(GLES2ImplementationStrictSharedTest, CrossContextGenerationTestBuffers) {
+ CrossContextGenerationTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestFramebuffers) {
+ CrossContextGenerationTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestRenderbuffers) {
+ CrossContextGenerationTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestTextures) {
+ CrossContextGenerationTest<GenTexturesAPI>();
+}
+
+// Test Delete which causes auto flush. Tests a regression case that occurred
+// in testing.
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestBuffers) {
+ CrossContextGenerationAutoFlushTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestFramebuffers) {
+ CrossContextGenerationAutoFlushTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestRenderbuffers) {
+ CrossContextGenerationAutoFlushTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestTextures) {
+ CrossContextGenerationAutoFlushTest<GenTexturesAPI>();
+}
+
+TEST_F(GLES2ImplementationTest, GetString) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const Str7 kString = {"foobar"};
+ // GL_CHROMIUM_map_sub GL_CHROMIUM_flipy are hard coded into
+ // GLES2Implementation.
+ const char* expected_str =
+ "foobar "
+ "GL_CHROMIUM_flipy "
+ "GL_EXT_unpack_subimage "
+ "GL_CHROMIUM_map_sub";
+ const char kBad = 0x12;
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetString get_string;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_string.Init(GL_EXTENSIONS, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ char buf[sizeof(kString) + 1];
+ memset(buf, kBad, sizeof(buf));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ const GLubyte* result = gl_->GetString(GL_EXTENSIONS);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_STREQ(expected_str, reinterpret_cast<const char*>(result));
+}
+
+TEST_F(GLES2ImplementationTest, PixelStoreiGLPackReverseRowOrderANGLE) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const Str7 kString = {"foobar"};
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetString get_string;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ cmds::PixelStorei pixel_store;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_string.Init(GL_EXTENSIONS, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ expected.pixel_store.Init(GL_PACK_REVERSE_ROW_ORDER_ANGLE, 1);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ gl_->PixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CreateProgram) {
+ struct Cmds {
+ cmds::CreateProgram cmd;
+ };
+
+ Cmds expected;
+ expected.cmd.Init(kProgramsAndShadersStartId);
+ GLuint id = gl_->CreateProgram();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kProgramsAndShadersStartId, id);
+}
+
+TEST_F(GLES2ImplementationTest, BufferDataLargerThanTransferBuffer) {
+ struct Cmds {
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ };
+ const unsigned kUsableSize =
+ kTransferBufferSize - GLES2Implementation::kStartingOffset;
+ uint8 buf[kUsableSize * 2] = { 0, };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kUsableSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kUsableSize);
+
+ Cmds expected;
+ expected.set_size.Init(
+ GL_ARRAY_BUFFER, arraysize(buf), 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, 0, kUsableSize, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kUsableSize, kUsableSize, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ gl_->BufferData(GL_ARRAY_BUFFER, arraysize(buf), buf, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CapabilitiesAreCached) {
+ static const GLenum kStates[] = {
+ GL_DITHER,
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+ };
+ struct Cmds {
+ cmds::Enable enable_cmd;
+ };
+ Cmds expected;
+
+ for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ GLenum state = kStates[ii];
+ expected.enable_cmd.Init(state);
+ GLboolean result = gl_->IsEnabled(state);
+ EXPECT_EQ(static_cast<GLboolean>(ii == 0), result);
+ EXPECT_TRUE(NoCommandsWritten());
+ const void* commands = GetPut();
+ if (!result) {
+ gl_->Enable(state);
+ EXPECT_EQ(0, memcmp(&expected, commands, sizeof(expected)));
+ }
+ ClearCommands();
+ result = gl_->IsEnabled(state);
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(NoCommandsWritten());
+ }
+}
+
+TEST_F(GLES2ImplementationTest, BindVertexArrayOES) {
+ GLuint id = 0;
+ gl_->GenVertexArraysOES(1, &id);
+ ClearCommands();
+
+ struct Cmds {
+ cmds::BindVertexArrayOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(id);
+
+ const void* commands = GetPut();
+ gl_->BindVertexArrayOES(id);
+ EXPECT_EQ(0, memcmp(&expected, commands, sizeof(expected)));
+ ClearCommands();
+ gl_->BindVertexArrayOES(id);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BeginEndQueryEXT) {
+ // Test GetQueryivEXT returns 0 if no current query.
+ GLint param = -1;
+ gl_->GetQueryivEXT(GL_ANY_SAMPLES_PASSED_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(0, param);
+
+ GLuint expected_ids[2] = { 1, 2 }; // These must match what's actually genned.
+ struct GenCmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ GenCmds expected_gen_cmds;
+ expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
+ GLuint ids[arraysize(expected_ids)] = { 0, };
+ gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(
+ &expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
+ GLuint id1 = ids[0];
+ GLuint id2 = ids[1];
+ ClearCommands();
+
+ // Test BeginQueryEXT fails if id = 0.
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, 0);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test BeginQueryEXT inserts command.
+ struct BeginCmds {
+ cmds::BeginQueryEXT begin_query;
+ };
+ BeginCmds expected_begin_cmds;
+ const void* commands = GetPut();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id1);
+ QueryTracker::Query* query = GetQuery(id1);
+ ASSERT_TRUE(query != NULL);
+ expected_begin_cmds.begin_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, id1, query->shm_id(), query->shm_offset());
+ EXPECT_EQ(0, memcmp(
+ &expected_begin_cmds, commands, sizeof(expected_begin_cmds)));
+ ClearCommands();
+
+ // Test GetQueryivEXT returns id.
+ param = -1;
+ gl_->GetQueryivEXT(GL_ANY_SAMPLES_PASSED_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(id1, static_cast<GLuint>(param));
+ gl_->GetQueryivEXT(
+ GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(0, param);
+
+ // Test BeginQueryEXT fails if between Begin/End.
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id2);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test EndQueryEXT fails if target not same as current query.
+ ClearCommands();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test EndQueryEXT sends command
+ struct EndCmds {
+ cmds::EndQueryEXT end_query;
+ };
+ EndCmds expected_end_cmds;
+ expected_end_cmds.end_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test EndQueryEXT fails if no current query.
+ ClearCommands();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test 2nd Begin/End increments count.
+ base::subtle::Atomic32 old_submit_count = query->submit_count();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id1);
+ EXPECT_NE(old_submit_count, query->submit_count());
+ expected_end_cmds.end_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test BeginQueryEXT fails if target changed.
+ ClearCommands();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, id1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT fails if unused id
+ GLuint available = 0xBDu;
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id2, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT fails if bad id
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(4567, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT CheckResultsAvailable
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id1, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_EQ(0u, available);
+}
+
+TEST_F(GLES2ImplementationTest, ErrorQuery) {
+ GLuint id = 0;
+ gl_->GenQueriesEXT(1, &id);
+ ClearCommands();
+
+ // Test BeginQueryEXT does NOT insert commands.
+ gl_->BeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, id);
+ EXPECT_TRUE(NoCommandsWritten());
+ QueryTracker::Query* query = GetQuery(id);
+ ASSERT_TRUE(query != NULL);
+
+ // Test EndQueryEXT sends both begin and end command
+ struct EndCmds {
+ cmds::BeginQueryEXT begin_query;
+ cmds::EndQueryEXT end_query;
+ };
+ EndCmds expected_end_cmds;
+ expected_end_cmds.begin_query.Init(
+ GL_GET_ERROR_QUERY_CHROMIUM, id, query->shm_id(), query->shm_offset());
+ expected_end_cmds.end_query.Init(
+ GL_GET_ERROR_QUERY_CHROMIUM, query->submit_count());
+ const void* commands = GetPut();
+ gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+ ClearCommands();
+
+ // Check result is not yet available.
+ GLuint available = 0xBDu;
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0u, available);
+
+ // Test no commands are sent if there is a client side error.
+
+ // Generate a client side error
+ gl_->ActiveTexture(GL_TEXTURE0 - 1);
+
+ gl_->BeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, id);
+ gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+ EXPECT_TRUE(NoCommandsWritten());
+
+ // Check result is available.
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_NE(0u, available);
+
+ // Check result.
+ GLuint result = 0xBDu;
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_EXT, &result);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLuint>(GL_INVALID_ENUM), result);
+}
+
+#if !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+TEST_F(GLES2ImplementationTest, VertexArrays) {
+ const GLuint kAttribIndex1 = 1;
+ const GLint kNumComponents1 = 3;
+ const GLsizei kClientStride = 12;
+
+ GLuint id = 0;
+ gl_->GenVertexArraysOES(1, &id);
+ ClearCommands();
+
+ gl_->BindVertexArrayOES(id);
+
+ // Test that VertexAttribPointer cannot be called with a bound buffer of 0
+ // unless the offset is NULL
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 0);
+
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride,
+ reinterpret_cast<const void*>(4));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+}
+#endif
+
+TEST_F(GLES2ImplementationTest, Disable) {
+ struct Cmds {
+ cmds::Disable cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_DITHER); // Note: DITHER defaults to enabled.
+
+ gl_->Disable(GL_DITHER);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ // Check it's cached and not called again.
+ ClearCommands();
+ gl_->Disable(GL_DITHER);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, Enable) {
+ struct Cmds {
+ cmds::Enable cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_BLEND); // Note: BLEND defaults to disabled.
+
+ gl_->Enable(GL_BLEND);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ // Check it's cached and not called again.
+ ClearCommands();
+ gl_->Enable(GL_BLEND);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, ConsumeTextureCHROMIUM) {
+ struct Cmds {
+ cmds::ConsumeTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ gl_->ConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CreateAndConsumeTextureCHROMIUM) {
+ struct Cmds {
+ cmds::CreateAndConsumeTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, kTexturesStartId, mailbox.name);
+ GLuint id = gl_->CreateAndConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kTexturesStartId, id);
+}
+
+TEST_F(GLES2ImplementationTest, ProduceTextureCHROMIUM) {
+ struct Cmds {
+ cmds::ProduceTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ gl_->ProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ProduceTextureDirectCHROMIUM) {
+ struct Cmds {
+ cmds::ProduceTextureDirectCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(kTexturesStartId, GL_TEXTURE_2D, mailbox.name);
+ gl_->ProduceTextureDirectCHROMIUM(
+ kTexturesStartId, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LimitSizeAndOffsetTo32Bit) {
+ GLsizeiptr size;
+ GLintptr offset;
+ if (sizeof(size) <= 4 || sizeof(offset) <= 4)
+ return;
+ // The below two casts should be no-op, as we return early if
+ // it's 32-bit system.
+ int64 value64 = 0x100000000;
+ size = static_cast<GLsizeiptr>(value64);
+ offset = static_cast<GLintptr>(value64);
+
+ const char kSizeOverflowMessage[] = "size more than 32-bit";
+ const char kOffsetOverflowMessage[] = "offset more than 32-bit";
+
+ const GLfloat buf[] = { 1.0, 1.0, 1.0, 1.0 };
+ const GLubyte indices[] = { 0 };
+
+ const GLuint kClientArrayBufferId = 0x789;
+ const GLuint kClientElementArrayBufferId = 0x790;
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kClientArrayBufferId);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, kClientElementArrayBufferId);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // Call BufferData() should succeed with legal paramaters.
+ gl_->BufferData(GL_ARRAY_BUFFER, sizeof(buf), buf, GL_DYNAMIC_DRAW);
+ gl_->BufferData(
+ GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // BufferData: size
+ gl_->BufferData(GL_ARRAY_BUFFER, size, buf, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call BufferSubData() should succeed with legal paramaters.
+ gl_->BufferSubData(GL_ARRAY_BUFFER, 0, sizeof(buf[0]), buf);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // BufferSubData: offset
+ gl_->BufferSubData(GL_ARRAY_BUFFER, offset, 1, buf);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // BufferSubData: size
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ gl_->BufferSubData(GL_ARRAY_BUFFER, 0, size, buf);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call MapBufferSubDataCHROMIUM() should succeed with legal paramaters.
+ void* mem =
+ gl_->MapBufferSubDataCHROMIUM(GL_ARRAY_BUFFER, 0, 1, GL_WRITE_ONLY);
+ EXPECT_TRUE(NULL != mem);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+
+ // MapBufferSubDataCHROMIUM: offset
+ EXPECT_TRUE(NULL == gl_->MapBufferSubDataCHROMIUM(
+ GL_ARRAY_BUFFER, offset, 1, GL_WRITE_ONLY));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // MapBufferSubDataCHROMIUM: size
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ EXPECT_TRUE(NULL == gl_->MapBufferSubDataCHROMIUM(
+ GL_ARRAY_BUFFER, 0, size, GL_WRITE_ONLY));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call DrawElements() should succeed with legal paramaters.
+ gl_->DrawElements(GL_POINTS, 1, GL_UNSIGNED_BYTE, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // DrawElements: offset
+ gl_->DrawElements(
+ GL_POINTS, 1, GL_UNSIGNED_BYTE, reinterpret_cast<void*>(offset));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // Call DrawElementsInstancedANGLE() should succeed with legal paramaters.
+ gl_->DrawElementsInstancedANGLE(GL_POINTS, 1, GL_UNSIGNED_BYTE, NULL, 1);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // DrawElementsInstancedANGLE: offset
+ gl_->DrawElementsInstancedANGLE(
+ GL_POINTS, 1, GL_UNSIGNED_BYTE, reinterpret_cast<void*>(offset), 1);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // Call VertexAttribPointer() should succeed with legal paramaters.
+ const GLuint kAttribIndex = 1;
+ const GLsizei kStride = 4;
+ gl_->VertexAttribPointer(
+ kAttribIndex, 1, GL_FLOAT, GL_FALSE, kStride, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // VertexAttribPointer: offset
+ gl_->VertexAttribPointer(
+ kAttribIndex, 1, GL_FLOAT, GL_FALSE, kStride,
+ reinterpret_cast<void*>(offset));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+}
+
+TEST_F(GLES2ImplementationManualInitTest, LoseContextOnOOM) {
+ ContextInitOptions init_options;
+ init_options.lose_context_when_out_of_memory = true;
+ ASSERT_TRUE(Initialize(init_options));
+
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+
+ GLsizei max = std::numeric_limits<GLsizei>::max();
+ EXPECT_CALL(*gpu_control_, CreateGpuMemoryBuffer(max, max, _, _, _))
+ .WillOnce(Return(static_cast<gfx::GpuMemoryBuffer*>(NULL)));
+ gl_->CreateImageCHROMIUM(max, max, 0, GL_IMAGE_MAP_CHROMIUM);
+ // The context should be lost.
+ Cmds expected;
+ expected.cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_UNKNOWN_CONTEXT_RESET_ARB);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationManualInitTest, NoLoseContextOnOOM) {
+ ContextInitOptions init_options;
+ ASSERT_TRUE(Initialize(init_options));
+
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+
+ GLsizei max = std::numeric_limits<GLsizei>::max();
+ EXPECT_CALL(*gpu_control_, CreateGpuMemoryBuffer(max, max, _, _, _))
+ .WillOnce(Return(static_cast<gfx::GpuMemoryBuffer*>(NULL)));
+ gl_->CreateImageCHROMIUM(max, max, 0, GL_IMAGE_MAP_CHROMIUM);
+ // The context should not be lost.
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationManualInitTest, FailInitOnBGRMismatch1) {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = false;
+ init_options.bind_generates_resource_service = true;
+ EXPECT_FALSE(Initialize(init_options));
+}
+
+TEST_F(GLES2ImplementationManualInitTest, FailInitOnBGRMismatch2) {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = true;
+ init_options.bind_generates_resource_service = false;
+ EXPECT_FALSE(Initialize(init_options));
+}
+
+#include "gpu/command_buffer/client/gles2_implementation_unittest_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
new file mode 100644
index 0000000..a42d6d5
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -0,0 +1,1926 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+
+TEST_F(GLES2ImplementationTest, AttachShader) {
+ struct Cmds {
+ cmds::AttachShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->AttachShader(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for BindAttribLocation
+
+TEST_F(GLES2ImplementationTest, BindBuffer) {
+ struct Cmds {
+ cmds::BindBuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ARRAY_BUFFER, 2);
+
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BindFramebuffer) {
+ struct Cmds {
+ cmds::BindFramebuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, 2);
+
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BindRenderbuffer) {
+ struct Cmds {
+ cmds::BindRenderbuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2);
+
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BlendColor) {
+ struct Cmds {
+ cmds::BlendColor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->BlendColor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquation) {
+ struct Cmds {
+ cmds::BlendEquation cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FUNC_SUBTRACT);
+
+ gl_->BlendEquation(GL_FUNC_SUBTRACT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquationSeparate) {
+ struct Cmds {
+ cmds::BlendEquationSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+
+ gl_->BlendEquationSeparate(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFunc) {
+ struct Cmds {
+ cmds::BlendFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ZERO, GL_ZERO);
+
+ gl_->BlendFunc(GL_ZERO, GL_ZERO);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFuncSeparate) {
+ struct Cmds {
+ cmds::BlendFuncSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+
+ gl_->BlendFuncSeparate(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CheckFramebufferStatus) {
+ struct Cmds {
+ cmds::CheckFramebufferStatus cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::CheckFramebufferStatus::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->CheckFramebufferStatus(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, Clear) {
+ struct Cmds {
+ cmds::Clear cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->Clear(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearColor) {
+ struct Cmds {
+ cmds::ClearColor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->ClearColor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearDepthf) {
+ struct Cmds {
+ cmds::ClearDepthf cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(0.5f);
+
+ gl_->ClearDepthf(0.5f);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearStencil) {
+ struct Cmds {
+ cmds::ClearStencil cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->ClearStencil(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ColorMask) {
+ struct Cmds {
+ cmds::ColorMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(true, true, true, true);
+
+ gl_->ColorMask(true, true, true, true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CompileShader) {
+ struct Cmds {
+ cmds::CompileShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->CompileShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for CompressedTexImage2D
+// TODO: Implement unit test for CompressedTexSubImage2D
+
+TEST_F(GLES2ImplementationTest, CopyTexImage2D) {
+ struct Cmds {
+ cmds::CopyTexImage2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7);
+
+ gl_->CopyTexImage2D(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7, 0);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CopyTexImage2DInvalidConstantArg7) {
+ gl_->CopyTexImage2D(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7, 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, CopyTexSubImage2D) {
+ struct Cmds {
+ cmds::CopyTexSubImage2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+
+ gl_->CopyTexSubImage2D(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CullFace) {
+ struct Cmds {
+ cmds::CullFace cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT);
+
+ gl_->CullFace(GL_FRONT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteBuffers) {
+ GLuint ids[2] = {kBuffersStartId, kBuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteBuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kBuffersStartId;
+ expected.data[1] = kBuffersStartId + 1;
+ gl_->DeleteBuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteFramebuffers) {
+ GLuint ids[2] = {kFramebuffersStartId, kFramebuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteFramebuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kFramebuffersStartId;
+ expected.data[1] = kFramebuffersStartId + 1;
+ gl_->DeleteFramebuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteProgram) {
+ struct Cmds {
+ cmds::DeleteProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DeleteProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteRenderbuffers) {
+ GLuint ids[2] = {kRenderbuffersStartId, kRenderbuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteRenderbuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kRenderbuffersStartId;
+ expected.data[1] = kRenderbuffersStartId + 1;
+ gl_->DeleteRenderbuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteShader) {
+ struct Cmds {
+ cmds::DeleteShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DeleteShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteTextures) {
+ GLuint ids[2] = {kTexturesStartId, kTexturesStartId + 1};
+ struct Cmds {
+ cmds::DeleteTexturesImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kTexturesStartId;
+ expected.data[1] = kTexturesStartId + 1;
+ gl_->DeleteTextures(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthFunc) {
+ struct Cmds {
+ cmds::DepthFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_NEVER);
+
+ gl_->DepthFunc(GL_NEVER);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthMask) {
+ struct Cmds {
+ cmds::DepthMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(true);
+
+ gl_->DepthMask(true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthRangef) {
+ struct Cmds {
+ cmds::DepthRangef cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DepthRangef(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DetachShader) {
+ struct Cmds {
+ cmds::DetachShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DetachShader(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DisableVertexAttribArray) {
+ struct Cmds {
+ cmds::DisableVertexAttribArray cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DisableVertexAttribArray(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArrays) {
+ struct Cmds {
+ cmds::DrawArrays cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_POINTS, 2, 3);
+
+ gl_->DrawArrays(GL_POINTS, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, EnableVertexAttribArray) {
+ struct Cmds {
+ cmds::EnableVertexAttribArray cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->EnableVertexAttribArray(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Flush) {
+ struct Cmds {
+ cmds::Flush cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->Flush();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferRenderbuffer) {
+ struct Cmds {
+ cmds::FramebufferRenderbuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 4);
+
+ gl_->FramebufferRenderbuffer(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2D) {
+ struct Cmds {
+ cmds::FramebufferTexture2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4);
+
+ gl_->FramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 0);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2DInvalidConstantArg4) {
+ gl_->FramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, FrontFace) {
+ struct Cmds {
+ cmds::FrontFace cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_CW);
+
+ gl_->FrontFace(GL_CW);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenBuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenBuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kBuffersStartId;
+ expected.data[1] = kBuffersStartId + 1;
+ gl_->GenBuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kBuffersStartId, ids[0]);
+ EXPECT_EQ(kBuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenerateMipmap) {
+ struct Cmds {
+ cmds::GenerateMipmap cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D);
+
+ gl_->GenerateMipmap(GL_TEXTURE_2D);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenFramebuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenFramebuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kFramebuffersStartId;
+ expected.data[1] = kFramebuffersStartId + 1;
+ gl_->GenFramebuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kFramebuffersStartId, ids[0]);
+ EXPECT_EQ(kFramebuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenRenderbuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenRenderbuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kRenderbuffersStartId;
+ expected.data[1] = kRenderbuffersStartId + 1;
+ gl_->GenRenderbuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kRenderbuffersStartId, ids[0]);
+ EXPECT_EQ(kRenderbuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenTextures) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenTexturesImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kTexturesStartId;
+ expected.data[1] = kTexturesStartId + 1;
+ gl_->GenTextures(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kTexturesStartId, ids[0]);
+ EXPECT_EQ(kTexturesStartId + 1, ids[1]);
+}
+// TODO: Implement unit test for GetActiveAttrib
+// TODO: Implement unit test for GetActiveUniform
+// TODO: Implement unit test for GetAttachedShaders
+// TODO: Implement unit test for GetAttribLocation
+
+TEST_F(GLES2ImplementationTest, GetBooleanv) {
+ struct Cmds {
+ cmds::GetBooleanv cmd;
+ };
+ typedef cmds::GetBooleanv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetBooleanv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetBufferParameteriv) {
+ struct Cmds {
+ cmds::GetBufferParameteriv cmd;
+ };
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_BUFFER_SIZE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetBufferParameteriv(123, GL_BUFFER_SIZE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetFloatv) {
+ struct Cmds {
+ cmds::GetFloatv cmd;
+ };
+ typedef cmds::GetFloatv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetFloatv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetFramebufferAttachmentParameteriv) {
+ struct Cmds {
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ };
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ result1.id,
+ result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetFramebufferAttachmentParameteriv(
+ 123,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetIntegerv) {
+ struct Cmds {
+ cmds::GetIntegerv cmd;
+ };
+ typedef cmds::GetIntegerv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetIntegerv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramiv) {
+ struct Cmds {
+ cmds::GetProgramiv cmd;
+ };
+ typedef cmds::GetProgramiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_DELETE_STATUS, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetProgramiv(123, GL_DELETE_STATUS, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetProgramInfoLog
+
+TEST_F(GLES2ImplementationTest, GetRenderbufferParameteriv) {
+ struct Cmds {
+ cmds::GetRenderbufferParameteriv cmd;
+ };
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_RENDERBUFFER_RED_SIZE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetRenderbufferParameteriv(123, GL_RENDERBUFFER_RED_SIZE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderiv) {
+ struct Cmds {
+ cmds::GetShaderiv cmd;
+ };
+ typedef cmds::GetShaderiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_SHADER_TYPE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetShaderiv(123, GL_SHADER_TYPE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetShaderInfoLog
+// TODO: Implement unit test for GetShaderPrecisionFormat
+
+TEST_F(GLES2ImplementationTest, GetTexParameterfv) {
+ struct Cmds {
+ cmds::GetTexParameterfv cmd;
+ };
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_TEXTURE_MAG_FILTER, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetTexParameterfv(123, GL_TEXTURE_MAG_FILTER, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetTexParameteriv) {
+ struct Cmds {
+ cmds::GetTexParameteriv cmd;
+ };
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_TEXTURE_MAG_FILTER, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetTexParameteriv(123, GL_TEXTURE_MAG_FILTER, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetUniformfv
+// TODO: Implement unit test for GetUniformiv
+// TODO: Implement unit test for GetUniformLocation
+
+TEST_F(GLES2ImplementationTest, GetVertexAttribfv) {
+ struct Cmds {
+ cmds::GetVertexAttribfv cmd;
+ };
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(
+ 123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetVertexAttribfv(123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexAttribiv) {
+ struct Cmds {
+ cmds::GetVertexAttribiv cmd;
+ };
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(
+ 123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetVertexAttribiv(123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, Hint) {
+ struct Cmds {
+ cmds::Hint cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+
+ gl_->Hint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, IsBuffer) {
+ struct Cmds {
+ cmds::IsBuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsBuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsBuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsEnabled) {
+ struct Cmds {
+ cmds::IsEnabled cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsEnabled::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsEnabled(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsFramebuffer) {
+ struct Cmds {
+ cmds::IsFramebuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsFramebuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsFramebuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsProgram) {
+ struct Cmds {
+ cmds::IsProgram cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsProgram::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsRenderbuffer) {
+ struct Cmds {
+ cmds::IsRenderbuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsRenderbuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsRenderbuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsShader) {
+ struct Cmds {
+ cmds::IsShader cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsShader::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsTexture) {
+ struct Cmds {
+ cmds::IsTexture cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsTexture::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsTexture(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, LineWidth) {
+ struct Cmds {
+ cmds::LineWidth cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(0.5f);
+
+ gl_->LineWidth(0.5f);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LinkProgram) {
+ struct Cmds {
+ cmds::LinkProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->LinkProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, PixelStorei) {
+ struct Cmds {
+ cmds::PixelStorei cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_PACK_ALIGNMENT, 1);
+
+ gl_->PixelStorei(GL_PACK_ALIGNMENT, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, PolygonOffset) {
+ struct Cmds {
+ cmds::PolygonOffset cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->PolygonOffset(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReleaseShaderCompiler) {
+ struct Cmds {
+ cmds::ReleaseShaderCompiler cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->ReleaseShaderCompiler();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorage) {
+ struct Cmds {
+ cmds::RenderbufferStorage cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+
+ gl_->RenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, SampleCoverage) {
+ struct Cmds {
+ cmds::SampleCoverage cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, true);
+
+ gl_->SampleCoverage(1, true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Scissor) {
+ struct Cmds {
+ cmds::Scissor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Scissor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilFunc) {
+ struct Cmds {
+ cmds::StencilFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_NEVER, 2, 3);
+
+ gl_->StencilFunc(GL_NEVER, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilFuncSeparate) {
+ struct Cmds {
+ cmds::StencilFuncSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, GL_NEVER, 3, 4);
+
+ gl_->StencilFuncSeparate(GL_FRONT, GL_NEVER, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilMask) {
+ struct Cmds {
+ cmds::StencilMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->StencilMask(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilMaskSeparate) {
+ struct Cmds {
+ cmds::StencilMaskSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, 2);
+
+ gl_->StencilMaskSeparate(GL_FRONT, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilOp) {
+ struct Cmds {
+ cmds::StencilOp cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_KEEP, GL_INCR, GL_KEEP);
+
+ gl_->StencilOp(GL_KEEP, GL_INCR, GL_KEEP);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilOpSeparate) {
+ struct Cmds {
+ cmds::StencilOpSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+
+ gl_->StencilOpSeparate(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameterf) {
+ struct Cmds {
+ cmds::TexParameterf cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ gl_->TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameterfv) {
+ GLfloat data[1] = {0};
+ struct Cmds {
+ cmds::TexParameterfvImmediate cmd;
+ GLfloat data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ gl_->TexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameteri) {
+ struct Cmds {
+ cmds::TexParameteri cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameteriv) {
+ GLint data[1] = {0};
+ struct Cmds {
+ cmds::TexParameterivImmediate cmd;
+ GLint data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLint>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ gl_->TexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1f) {
+ struct Cmds {
+ cmds::Uniform1f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->Uniform1f(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1fv) {
+ GLfloat data[2][1] = {{0}};
+ struct Cmds {
+ cmds::Uniform1fvImmediate cmd;
+ GLfloat data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform1fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1i) {
+ struct Cmds {
+ cmds::Uniform1i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->Uniform1i(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1iv) {
+ GLint data[2][1] = {{0}};
+ struct Cmds {
+ cmds::Uniform1ivImmediate cmd;
+ GLint data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform1iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2f) {
+ struct Cmds {
+ cmds::Uniform2f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->Uniform2f(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2fv) {
+ GLfloat data[2][2] = {{0}};
+ struct Cmds {
+ cmds::Uniform2fvImmediate cmd;
+ GLfloat data[2][2];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 2; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 2 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform2fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2i) {
+ struct Cmds {
+ cmds::Uniform2i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->Uniform2i(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2iv) {
+ GLint data[2][2] = {{0}};
+ struct Cmds {
+ cmds::Uniform2ivImmediate cmd;
+ GLint data[2][2];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 2; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 2 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform2iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3f) {
+ struct Cmds {
+ cmds::Uniform3f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Uniform3f(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3fv) {
+ GLfloat data[2][3] = {{0}};
+ struct Cmds {
+ cmds::Uniform3fvImmediate cmd;
+ GLfloat data[2][3];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 3; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 3 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform3fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3i) {
+ struct Cmds {
+ cmds::Uniform3i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Uniform3i(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3iv) {
+ GLint data[2][3] = {{0}};
+ struct Cmds {
+ cmds::Uniform3ivImmediate cmd;
+ GLint data[2][3];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 3; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 3 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform3iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4f) {
+ struct Cmds {
+ cmds::Uniform4f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->Uniform4f(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4fv) {
+ GLfloat data[2][4] = {{0}};
+ struct Cmds {
+ cmds::Uniform4fvImmediate cmd;
+ GLfloat data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform4fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4i) {
+ struct Cmds {
+ cmds::Uniform4i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->Uniform4i(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4iv) {
+ GLint data[2][4] = {{0}};
+ struct Cmds {
+ cmds::Uniform4ivImmediate cmd;
+ GLint data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform4iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix2fv) {
+ GLfloat data[2][4] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix2fvImmediate cmd;
+ GLfloat data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix2fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix2fvInvalidConstantArg2) {
+ GLfloat data[2][4] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ gl_->UniformMatrix2fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix3fv) {
+ GLfloat data[2][9] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix3fvImmediate cmd;
+ GLfloat data[2][9];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 9; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 9 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix3fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix3fvInvalidConstantArg2) {
+ GLfloat data[2][9] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 9; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 9 + jj);
+ }
+ }
+ gl_->UniformMatrix3fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix4fv) {
+ GLfloat data[2][16] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix4fvImmediate cmd;
+ GLfloat data[2][16];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 16; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 16 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix4fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix4fvInvalidConstantArg2) {
+ GLfloat data[2][16] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 16; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 16 + jj);
+ }
+ }
+ gl_->UniformMatrix4fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UseProgram) {
+ struct Cmds {
+ cmds::UseProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->UseProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->UseProgram(1);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, ValidateProgram) {
+ struct Cmds {
+ cmds::ValidateProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->ValidateProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib1f) {
+ struct Cmds {
+ cmds::VertexAttrib1f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->VertexAttrib1f(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib1fv) {
+ GLfloat data[1] = {0};
+ struct Cmds {
+ cmds::VertexAttrib1fvImmediate cmd;
+ GLfloat data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib1fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib2f) {
+ struct Cmds {
+ cmds::VertexAttrib2f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->VertexAttrib2f(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib2fv) {
+ GLfloat data[2] = {0};
+ struct Cmds {
+ cmds::VertexAttrib2fvImmediate cmd;
+ GLfloat data[2];
+ };
+
+ for (int jj = 0; jj < 2; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib2fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib3f) {
+ struct Cmds {
+ cmds::VertexAttrib3f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->VertexAttrib3f(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib3fv) {
+ GLfloat data[3] = {0};
+ struct Cmds {
+ cmds::VertexAttrib3fvImmediate cmd;
+ GLfloat data[3];
+ };
+
+ for (int jj = 0; jj < 3; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib3fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib4f) {
+ struct Cmds {
+ cmds::VertexAttrib4f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->VertexAttrib4f(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib4fv) {
+ GLfloat data[4] = {0};
+ struct Cmds {
+ cmds::VertexAttrib4fvImmediate cmd;
+ GLfloat data[4];
+ };
+
+ for (int jj = 0; jj < 4; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib4fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Viewport) {
+ struct Cmds {
+ cmds::Viewport cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Viewport(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlitFramebufferCHROMIUM) {
+ struct Cmds {
+ cmds::BlitFramebufferCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, 9, GL_NEAREST);
+
+ gl_->BlitFramebufferCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, 9, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleCHROMIUM) {
+ struct Cmds {
+ cmds::RenderbufferStorageMultisampleCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+
+ gl_->RenderbufferStorageMultisampleCHROMIUM(
+ GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleEXT) {
+ struct Cmds {
+ cmds::RenderbufferStorageMultisampleEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+
+ gl_->RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2DMultisampleEXT) {
+ struct Cmds {
+ cmds::FramebufferTexture2DMultisampleEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 6);
+
+ gl_->FramebufferTexture2DMultisampleEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 0, 6);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest,
+ FramebufferTexture2DMultisampleEXTInvalidConstantArg4) {
+ gl_->FramebufferTexture2DMultisampleEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 1, 6);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, TexStorage2DEXT) {
+ struct Cmds {
+ cmds::TexStorage2DEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, GL_RGB565, 4, 5);
+
+ gl_->TexStorage2DEXT(GL_TEXTURE_2D, 2, GL_RGB565, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenQueriesEXT) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->GenQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kQueriesStartId, ids[0]);
+ EXPECT_EQ(kQueriesStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, DeleteQueriesEXT) {
+ GLuint ids[2] = {kQueriesStartId, kQueriesStartId + 1};
+ struct Cmds {
+ cmds::DeleteQueriesEXTImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->DeleteQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for BeginQueryEXT
+// TODO: Implement unit test for InsertEventMarkerEXT
+// TODO: Implement unit test for PushGroupMarkerEXT
+
+TEST_F(GLES2ImplementationTest, PopGroupMarkerEXT) {
+ struct Cmds {
+ cmds::PopGroupMarkerEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->PopGroupMarkerEXT();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenVertexArraysOES) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenVertexArraysOESImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kVertexArraysStartId;
+ expected.data[1] = kVertexArraysStartId + 1;
+ gl_->GenVertexArraysOES(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kVertexArraysStartId, ids[0]);
+ EXPECT_EQ(kVertexArraysStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, DeleteVertexArraysOES) {
+ GLuint ids[2] = {kVertexArraysStartId, kVertexArraysStartId + 1};
+ struct Cmds {
+ cmds::DeleteVertexArraysOESImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kVertexArraysStartId;
+ expected.data[1] = kVertexArraysStartId + 1;
+ gl_->DeleteVertexArraysOES(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, IsVertexArrayOES) {
+ struct Cmds {
+ cmds::IsVertexArrayOES cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsVertexArrayOES::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsVertexArrayOES(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+// TODO: Implement unit test for EnableFeatureCHROMIUM
+
+TEST_F(GLES2ImplementationTest, ResizeCHROMIUM) {
+ struct Cmds {
+ cmds::ResizeCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->ResizeCHROMIUM(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for GetRequestableExtensionsCHROMIUM
+// TODO: Implement unit test for CreateStreamTextureCHROMIUM
+// TODO: Implement unit test for GetTranslatedShaderSourceANGLE
+
+TEST_F(GLES2ImplementationTest, TexImageIOSurface2DCHROMIUM) {
+ struct Cmds {
+ cmds::TexImageIOSurface2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5);
+
+ gl_->TexImageIOSurface2DCHROMIUM(GL_TEXTURE_2D, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CopyTextureCHROMIUM) {
+ struct Cmds {
+ cmds::CopyTextureCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE);
+
+ gl_->CopyTextureCHROMIUM(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLE) {
+ struct Cmds {
+ cmds::DrawArraysInstancedANGLE cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_POINTS, 2, 3, 4);
+
+ gl_->DrawArraysInstancedANGLE(GL_POINTS, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttribDivisorANGLE) {
+ struct Cmds {
+ cmds::VertexAttribDivisorANGLE cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->VertexAttribDivisorANGLE(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for GenMailboxCHROMIUM
+// TODO: Implement unit test for BindUniformLocationCHROMIUM
+
+TEST_F(GLES2ImplementationTest, BindTexImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::BindTexImage2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2);
+
+ gl_->BindTexImage2DCHROMIUM(GL_TEXTURE_2D, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReleaseTexImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::ReleaseTexImage2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2);
+
+ gl_->ReleaseTexImage2DCHROMIUM(GL_TEXTURE_2D, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DiscardFramebufferEXT) {
+ GLenum data[2][1] = {{0}};
+ struct Cmds {
+ cmds::DiscardFramebufferEXTImmediate cmd;
+ GLenum data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLenum>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->DiscardFramebufferEXT(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LoseContextCHROMIUM) {
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_GUILTY_CONTEXT_RESET_ARB);
+
+ gl_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for InsertSyncPointCHROMIUM
+
+TEST_F(GLES2ImplementationTest, WaitSyncPointCHROMIUM) {
+ struct Cmds {
+ cmds::WaitSyncPointCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->WaitSyncPointCHROMIUM(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawBuffersEXT) {
+ GLenum data[1][1] = {{0}};
+ struct Cmds {
+ cmds::DrawBuffersEXTImmediate cmd;
+ GLenum data[1][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 1; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLenum>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, &data[0][0]);
+ gl_->DrawBuffersEXT(1, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DiscardBackbufferCHROMIUM) {
+ struct Cmds {
+ cmds::DiscardBackbufferCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->DiscardBackbufferCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MatrixLoadfCHROMIUM) {
+ GLfloat data[16] = {0};
+ struct Cmds {
+ cmds::MatrixLoadfCHROMIUMImmediate cmd;
+ GLfloat data[16];
+ };
+
+ for (int jj = 0; jj < 16; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
+ gl_->MatrixLoadfCHROMIUM(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MatrixLoadIdentityCHROMIUM) {
+ struct Cmds {
+ cmds::MatrixLoadIdentityCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
+
+ gl_->MatrixLoadIdentityCHROMIUM(GL_PATH_PROJECTION_CHROMIUM);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface.h b/gpu/command_buffer/client/gles2_interface.h
new file mode 100644
index 0000000..ca05308
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
+
+#include <GLES2/gl2.h>
+
+#include "base/compiler_specific.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class is the interface for all client side GL functions.
+class GLES2Interface {
+ public:
+ GLES2Interface() {}
+ virtual ~GLES2Interface() {}
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_interface_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
diff --git a/gpu/command_buffer/client/gles2_interface_autogen.h b/gpu/command_buffer/client/gles2_interface_autogen.h
new file mode 100644
index 0000000..abfc598
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -0,0 +1,503 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) = 0;
+virtual void AttachShader(GLuint program, GLuint shader) = 0;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) = 0;
+virtual void BindBuffer(GLenum target, GLuint buffer) = 0;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) = 0;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) = 0;
+virtual void BindTexture(GLenum target, GLuint texture) = 0;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) = 0;
+virtual void BlendEquation(GLenum mode) = 0;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) = 0;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) = 0;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) = 0;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) = 0;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) = 0;
+virtual GLenum CheckFramebufferStatus(GLenum target) = 0;
+virtual void Clear(GLbitfield mask) = 0;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) = 0;
+virtual void ClearDepthf(GLclampf depth) = 0;
+virtual void ClearStencil(GLint s) = 0;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) = 0;
+virtual void CompileShader(GLuint shader) = 0;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) = 0;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) = 0;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) = 0;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual GLuint CreateProgram() = 0;
+virtual GLuint CreateShader(GLenum type) = 0;
+virtual void CullFace(GLenum mode) = 0;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) = 0;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) = 0;
+virtual void DeleteProgram(GLuint program) = 0;
+virtual void DeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) = 0;
+virtual void DeleteShader(GLuint shader) = 0;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) = 0;
+virtual void DepthFunc(GLenum func) = 0;
+virtual void DepthMask(GLboolean flag) = 0;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) = 0;
+virtual void DetachShader(GLuint program, GLuint shader) = 0;
+virtual void Disable(GLenum cap) = 0;
+virtual void DisableVertexAttribArray(GLuint index) = 0;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) = 0;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) = 0;
+virtual void Enable(GLenum cap) = 0;
+virtual void EnableVertexAttribArray(GLuint index) = 0;
+virtual void Finish() = 0;
+virtual void Flush() = 0;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) = 0;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) = 0;
+virtual void FrontFace(GLenum mode) = 0;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) = 0;
+virtual void GenerateMipmap(GLenum target) = 0;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) = 0;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) = 0;
+virtual void GenTextures(GLsizei n, GLuint* textures) = 0;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) = 0;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) = 0;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) = 0;
+virtual GLint GetAttribLocation(GLuint program, const char* name) = 0;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) = 0;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) = 0;
+virtual GLenum GetError() = 0;
+virtual void GetFloatv(GLenum pname, GLfloat* params) = 0;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) = 0;
+virtual void GetIntegerv(GLenum pname, GLint* params) = 0;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) = 0;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) = 0;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) = 0;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) = 0;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) = 0;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) = 0;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) = 0;
+virtual const GLubyte* GetString(GLenum name) = 0;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) = 0;
+virtual void GetTexParameteriv(GLenum target, GLenum pname, GLint* params) = 0;
+virtual void GetUniformfv(GLuint program, GLint location, GLfloat* params) = 0;
+virtual void GetUniformiv(GLuint program, GLint location, GLint* params) = 0;
+virtual GLint GetUniformLocation(GLuint program, const char* name) = 0;
+virtual void GetVertexAttribfv(GLuint index, GLenum pname, GLfloat* params) = 0;
+virtual void GetVertexAttribiv(GLuint index, GLenum pname, GLint* params) = 0;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) = 0;
+virtual void Hint(GLenum target, GLenum mode) = 0;
+virtual GLboolean IsBuffer(GLuint buffer) = 0;
+virtual GLboolean IsEnabled(GLenum cap) = 0;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) = 0;
+virtual GLboolean IsProgram(GLuint program) = 0;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) = 0;
+virtual GLboolean IsShader(GLuint shader) = 0;
+virtual GLboolean IsTexture(GLuint texture) = 0;
+virtual void LineWidth(GLfloat width) = 0;
+virtual void LinkProgram(GLuint program) = 0;
+virtual void PixelStorei(GLenum pname, GLint param) = 0;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) = 0;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) = 0;
+virtual void ReleaseShaderCompiler() = 0;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) = 0;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) = 0;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) = 0;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) = 0;
+virtual void ShallowFinishCHROMIUM() = 0;
+virtual void ShallowFlushCHROMIUM() = 0;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) = 0;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) = 0;
+virtual void StencilMask(GLuint mask) = 0;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) = 0;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) = 0;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) = 0;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) = 0;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) = 0;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) = 0;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) = 0;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void Uniform1f(GLint location, GLfloat x) = 0;
+virtual void Uniform1fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform1i(GLint location, GLint x) = 0;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) = 0;
+virtual void Uniform2fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform2i(GLint location, GLint x, GLint y) = 0;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) = 0;
+virtual void Uniform3fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) = 0;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) = 0;
+virtual void Uniform4fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) = 0;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UseProgram(GLuint program) = 0;
+virtual void ValidateProgram(GLuint program) = 0;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) = 0;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) = 0;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) = 0;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) = 0;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) = 0;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) = 0;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) = 0;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) = 0;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) = 0;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) = 0;
+virtual GLboolean IsQueryEXT(GLuint id) = 0;
+virtual void BeginQueryEXT(GLenum target, GLuint id) = 0;
+virtual void EndQueryEXT(GLenum target) = 0;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) = 0;
+virtual void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) = 0;
+virtual void InsertEventMarkerEXT(GLsizei length, const GLchar* marker) = 0;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) = 0;
+virtual void PopGroupMarkerEXT() = 0;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) = 0;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) = 0;
+virtual GLboolean IsVertexArrayOES(GLuint array) = 0;
+virtual void BindVertexArrayOES(GLuint array) = 0;
+virtual void SwapBuffers() = 0;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) = 0;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) = 0;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) = 0;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) = 0;
+virtual void* MapImageCHROMIUM(GLuint image_id) = 0;
+virtual void UnmapImageCHROMIUM(GLuint image_id) = 0;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) = 0;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) = 0;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) = 0;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) = 0;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) = 0;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() = 0;
+virtual void RequestExtensionCHROMIUM(const char* extension) = 0;
+virtual void RateLimitOffscreenContextCHROMIUM() = 0;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) = 0;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) = 0;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) = 0;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) = 0;
+virtual void DestroyImageCHROMIUM(GLuint image_id) = 0;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) = 0;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) = 0;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) = 0;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) = 0;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) = 0;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) = 0;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) = 0;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) = 0;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) = 0;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) = 0;
+virtual void ProduceTextureCHROMIUM(GLenum target, const GLbyte* mailbox) = 0;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) = 0;
+virtual void ConsumeTextureCHROMIUM(GLenum target, const GLbyte* mailbox) = 0;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) = 0;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) = 0;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
+virtual void TraceBeginCHROMIUM(const char* name) = 0;
+virtual void TraceEndCHROMIUM() = 0;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) = 0;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) = 0;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() = 0;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) = 0;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) = 0;
+virtual GLuint InsertSyncPointCHROMIUM() = 0;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) = 0;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) = 0;
+virtual void DiscardBackbufferCHROMIUM() = 0;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) = 0;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) = 0;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) = 0;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub.cc b/gpu/command_buffer/client/gles2_interface_stub.cc
new file mode 100644
index 0000000..3f4d7ba
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_interface_stub.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2InterfaceStub::GLES2InterfaceStub() {
+}
+
+GLES2InterfaceStub::~GLES2InterfaceStub() {
+}
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/gles2_interface_stub.h b/gpu/command_buffer/client/gles2_interface_stub.h
new file mode 100644
index 0000000..cf3fb41
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
+
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class a stub to help with mocks for the GLES2Interface class.
+class GLES2InterfaceStub : public GLES2Interface {
+ public:
+ GLES2InterfaceStub();
+ virtual ~GLES2InterfaceStub();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_interface_stub_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
new file mode 100644
index 0000000..67fc2da
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -0,0 +1,536 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.h.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+virtual void Clear(GLbitfield mask) OVERRIDE;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+virtual void ClearStencil(GLint s) OVERRIDE;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+virtual void CompileShader(GLuint shader) OVERRIDE;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual GLuint CreateProgram() OVERRIDE;
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+virtual void CullFace(GLenum mode) OVERRIDE;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+virtual void DepthFunc(GLenum func) OVERRIDE;
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void Disable(GLenum cap) OVERRIDE;
+virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+virtual void Enable(GLenum cap) OVERRIDE;
+virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void Finish() OVERRIDE;
+virtual void Flush() OVERRIDE;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+virtual void FrontFace(GLenum mode) OVERRIDE;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLenum GetError() OVERRIDE;
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+virtual void LineWidth(GLfloat width) OVERRIDE;
+virtual void LinkProgram(GLuint program) OVERRIDE;
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+virtual void ReleaseShaderCompiler() OVERRIDE;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+virtual void StencilMask(GLuint mask) OVERRIDE;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UseProgram(GLuint program) OVERRIDE;
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+virtual void PopGroupMarkerEXT() OVERRIDE;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+virtual void SwapBuffers() OVERRIDE;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+virtual void TraceEndCHROMIUM() OVERRIDE;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
new file mode 100644
index 0000000..cde303f
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -0,0 +1,863 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.cc.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
+
+void GLES2InterfaceStub::ActiveTexture(GLenum /* texture */) {
+}
+void GLES2InterfaceStub::AttachShader(GLuint /* program */,
+ GLuint /* shader */) {
+}
+void GLES2InterfaceStub::BindAttribLocation(GLuint /* program */,
+ GLuint /* index */,
+ const char* /* name */) {
+}
+void GLES2InterfaceStub::BindBuffer(GLenum /* target */, GLuint /* buffer */) {
+}
+void GLES2InterfaceStub::BindFramebuffer(GLenum /* target */,
+ GLuint /* framebuffer */) {
+}
+void GLES2InterfaceStub::BindRenderbuffer(GLenum /* target */,
+ GLuint /* renderbuffer */) {
+}
+void GLES2InterfaceStub::BindTexture(GLenum /* target */,
+ GLuint /* texture */) {
+}
+void GLES2InterfaceStub::BlendColor(GLclampf /* red */,
+ GLclampf /* green */,
+ GLclampf /* blue */,
+ GLclampf /* alpha */) {
+}
+void GLES2InterfaceStub::BlendEquation(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::BlendEquationSeparate(GLenum /* modeRGB */,
+ GLenum /* modeAlpha */) {
+}
+void GLES2InterfaceStub::BlendFunc(GLenum /* sfactor */, GLenum /* dfactor */) {
+}
+void GLES2InterfaceStub::BlendFuncSeparate(GLenum /* srcRGB */,
+ GLenum /* dstRGB */,
+ GLenum /* srcAlpha */,
+ GLenum /* dstAlpha */) {
+}
+void GLES2InterfaceStub::BufferData(GLenum /* target */,
+ GLsizeiptr /* size */,
+ const void* /* data */,
+ GLenum /* usage */) {
+}
+void GLES2InterfaceStub::BufferSubData(GLenum /* target */,
+ GLintptr /* offset */,
+ GLsizeiptr /* size */,
+ const void* /* data */) {
+}
+GLenum GLES2InterfaceStub::CheckFramebufferStatus(GLenum /* target */) {
+ return 0;
+}
+void GLES2InterfaceStub::Clear(GLbitfield /* mask */) {
+}
+void GLES2InterfaceStub::ClearColor(GLclampf /* red */,
+ GLclampf /* green */,
+ GLclampf /* blue */,
+ GLclampf /* alpha */) {
+}
+void GLES2InterfaceStub::ClearDepthf(GLclampf /* depth */) {
+}
+void GLES2InterfaceStub::ClearStencil(GLint /* s */) {
+}
+void GLES2InterfaceStub::ColorMask(GLboolean /* red */,
+ GLboolean /* green */,
+ GLboolean /* blue */,
+ GLboolean /* alpha */) {
+}
+void GLES2InterfaceStub::CompileShader(GLuint /* shader */) {
+}
+void GLES2InterfaceStub::CompressedTexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLsizei /* imageSize */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::CompressedTexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLsizei /* imageSize */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::CopyTexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */) {
+}
+void GLES2InterfaceStub::CopyTexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+GLuint GLES2InterfaceStub::CreateProgram() {
+ return 0;
+}
+GLuint GLES2InterfaceStub::CreateShader(GLenum /* type */) {
+ return 0;
+}
+void GLES2InterfaceStub::CullFace(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::DeleteBuffers(GLsizei /* n */,
+ const GLuint* /* buffers */) {
+}
+void GLES2InterfaceStub::DeleteFramebuffers(GLsizei /* n */,
+ const GLuint* /* framebuffers */) {
+}
+void GLES2InterfaceStub::DeleteProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::DeleteRenderbuffers(
+ GLsizei /* n */,
+ const GLuint* /* renderbuffers */) {
+}
+void GLES2InterfaceStub::DeleteShader(GLuint /* shader */) {
+}
+void GLES2InterfaceStub::DeleteTextures(GLsizei /* n */,
+ const GLuint* /* textures */) {
+}
+void GLES2InterfaceStub::DepthFunc(GLenum /* func */) {
+}
+void GLES2InterfaceStub::DepthMask(GLboolean /* flag */) {
+}
+void GLES2InterfaceStub::DepthRangef(GLclampf /* zNear */,
+ GLclampf /* zFar */) {
+}
+void GLES2InterfaceStub::DetachShader(GLuint /* program */,
+ GLuint /* shader */) {
+}
+void GLES2InterfaceStub::Disable(GLenum /* cap */) {
+}
+void GLES2InterfaceStub::DisableVertexAttribArray(GLuint /* index */) {
+}
+void GLES2InterfaceStub::DrawArrays(GLenum /* mode */,
+ GLint /* first */,
+ GLsizei /* count */) {
+}
+void GLES2InterfaceStub::DrawElements(GLenum /* mode */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ const void* /* indices */) {
+}
+void GLES2InterfaceStub::Enable(GLenum /* cap */) {
+}
+void GLES2InterfaceStub::EnableVertexAttribArray(GLuint /* index */) {
+}
+void GLES2InterfaceStub::Finish() {
+}
+void GLES2InterfaceStub::Flush() {
+}
+void GLES2InterfaceStub::FramebufferRenderbuffer(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* renderbuffertarget */,
+ GLuint /* renderbuffer */) {
+}
+void GLES2InterfaceStub::FramebufferTexture2D(GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* textarget */,
+ GLuint /* texture */,
+ GLint /* level */) {
+}
+void GLES2InterfaceStub::FrontFace(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::GenBuffers(GLsizei /* n */, GLuint* /* buffers */) {
+}
+void GLES2InterfaceStub::GenerateMipmap(GLenum /* target */) {
+}
+void GLES2InterfaceStub::GenFramebuffers(GLsizei /* n */,
+ GLuint* /* framebuffers */) {
+}
+void GLES2InterfaceStub::GenRenderbuffers(GLsizei /* n */,
+ GLuint* /* renderbuffers */) {
+}
+void GLES2InterfaceStub::GenTextures(GLsizei /* n */, GLuint* /* textures */) {
+}
+void GLES2InterfaceStub::GetActiveAttrib(GLuint /* program */,
+ GLuint /* index */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ GLint* /* size */,
+ GLenum* /* type */,
+ char* /* name */) {
+}
+void GLES2InterfaceStub::GetActiveUniform(GLuint /* program */,
+ GLuint /* index */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ GLint* /* size */,
+ GLenum* /* type */,
+ char* /* name */) {
+}
+void GLES2InterfaceStub::GetAttachedShaders(GLuint /* program */,
+ GLsizei /* maxcount */,
+ GLsizei* /* count */,
+ GLuint* /* shaders */) {
+}
+GLint GLES2InterfaceStub::GetAttribLocation(GLuint /* program */,
+ const char* /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetBooleanv(GLenum /* pname */,
+ GLboolean* /* params */) {
+}
+void GLES2InterfaceStub::GetBufferParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+GLenum GLES2InterfaceStub::GetError() {
+ return 0;
+}
+void GLES2InterfaceStub::GetFloatv(GLenum /* pname */, GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetFramebufferAttachmentParameteriv(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetIntegerv(GLenum /* pname */, GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetProgramiv(GLuint /* program */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetProgramInfoLog(GLuint /* program */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* infolog */) {
+}
+void GLES2InterfaceStub::GetRenderbufferParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetShaderiv(GLuint /* shader */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetShaderInfoLog(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* infolog */) {
+}
+void GLES2InterfaceStub::GetShaderPrecisionFormat(GLenum /* shadertype */,
+ GLenum /* precisiontype */,
+ GLint* /* range */,
+ GLint* /* precision */) {
+}
+void GLES2InterfaceStub::GetShaderSource(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* source */) {
+}
+const GLubyte* GLES2InterfaceStub::GetString(GLenum /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetTexParameterfv(GLenum /* target */,
+ GLenum /* pname */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetTexParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetUniformfv(GLuint /* program */,
+ GLint /* location */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetUniformiv(GLuint /* program */,
+ GLint /* location */,
+ GLint* /* params */) {
+}
+GLint GLES2InterfaceStub::GetUniformLocation(GLuint /* program */,
+ const char* /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetVertexAttribfv(GLuint /* index */,
+ GLenum /* pname */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetVertexAttribiv(GLuint /* index */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetVertexAttribPointerv(GLuint /* index */,
+ GLenum /* pname */,
+ void** /* pointer */) {
+}
+void GLES2InterfaceStub::Hint(GLenum /* target */, GLenum /* mode */) {
+}
+GLboolean GLES2InterfaceStub::IsBuffer(GLuint /* buffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsEnabled(GLenum /* cap */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsFramebuffer(GLuint /* framebuffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsProgram(GLuint /* program */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsRenderbuffer(GLuint /* renderbuffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsShader(GLuint /* shader */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsTexture(GLuint /* texture */) {
+ return 0;
+}
+void GLES2InterfaceStub::LineWidth(GLfloat /* width */) {
+}
+void GLES2InterfaceStub::LinkProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::PixelStorei(GLenum /* pname */, GLint /* param */) {
+}
+void GLES2InterfaceStub::PolygonOffset(GLfloat /* factor */,
+ GLfloat /* units */) {
+}
+void GLES2InterfaceStub::ReadPixels(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ void* /* pixels */) {
+}
+void GLES2InterfaceStub::ReleaseShaderCompiler() {
+}
+void GLES2InterfaceStub::RenderbufferStorage(GLenum /* target */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::SampleCoverage(GLclampf /* value */,
+ GLboolean /* invert */) {
+}
+void GLES2InterfaceStub::Scissor(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::ShaderBinary(GLsizei /* n */,
+ const GLuint* /* shaders */,
+ GLenum /* binaryformat */,
+ const void* /* binary */,
+ GLsizei /* length */) {
+}
+void GLES2InterfaceStub::ShaderSource(GLuint /* shader */,
+ GLsizei /* count */,
+ const GLchar* const* /* str */,
+ const GLint* /* length */) {
+}
+void GLES2InterfaceStub::ShallowFinishCHROMIUM() {
+}
+void GLES2InterfaceStub::ShallowFlushCHROMIUM() {
+}
+void GLES2InterfaceStub::StencilFunc(GLenum /* func */,
+ GLint /* ref */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilFuncSeparate(GLenum /* face */,
+ GLenum /* func */,
+ GLint /* ref */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilMask(GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilMaskSeparate(GLenum /* face */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilOp(GLenum /* fail */,
+ GLenum /* zfail */,
+ GLenum /* zpass */) {
+}
+void GLES2InterfaceStub::StencilOpSeparate(GLenum /* face */,
+ GLenum /* fail */,
+ GLenum /* zfail */,
+ GLenum /* zpass */) {
+}
+void GLES2InterfaceStub::TexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::TexParameterf(GLenum /* target */,
+ GLenum /* pname */,
+ GLfloat /* param */) {
+}
+void GLES2InterfaceStub::TexParameterfv(GLenum /* target */,
+ GLenum /* pname */,
+ const GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::TexParameteri(GLenum /* target */,
+ GLenum /* pname */,
+ GLint /* param */) {
+}
+void GLES2InterfaceStub::TexParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ const GLint* /* params */) {
+}
+void GLES2InterfaceStub::TexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::Uniform1f(GLint /* location */, GLfloat /* x */) {
+}
+void GLES2InterfaceStub::Uniform1fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform1i(GLint /* location */, GLint /* x */) {
+}
+void GLES2InterfaceStub::Uniform1iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform2f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */) {
+}
+void GLES2InterfaceStub::Uniform2fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform2i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */) {
+}
+void GLES2InterfaceStub::Uniform2iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform3f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */) {
+}
+void GLES2InterfaceStub::Uniform3fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform3i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */,
+ GLint /* z */) {
+}
+void GLES2InterfaceStub::Uniform3iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform4f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */,
+ GLfloat /* w */) {
+}
+void GLES2InterfaceStub::Uniform4fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform4i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */,
+ GLint /* z */,
+ GLint /* w */) {
+}
+void GLES2InterfaceStub::Uniform4iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::UniformMatrix2fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UniformMatrix3fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UniformMatrix4fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UseProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::ValidateProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::VertexAttrib1f(GLuint /* indx */, GLfloat /* x */) {
+}
+void GLES2InterfaceStub::VertexAttrib1fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib2f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */) {
+}
+void GLES2InterfaceStub::VertexAttrib2fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib3f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */) {
+}
+void GLES2InterfaceStub::VertexAttrib3fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib4f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */,
+ GLfloat /* w */) {
+}
+void GLES2InterfaceStub::VertexAttrib4fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttribPointer(GLuint /* indx */,
+ GLint /* size */,
+ GLenum /* type */,
+ GLboolean /* normalized */,
+ GLsizei /* stride */,
+ const void* /* ptr */) {
+}
+void GLES2InterfaceStub::Viewport(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::BlitFramebufferCHROMIUM(GLint /* srcX0 */,
+ GLint /* srcY0 */,
+ GLint /* srcX1 */,
+ GLint /* srcY1 */,
+ GLint /* dstX0 */,
+ GLint /* dstY0 */,
+ GLint /* dstX1 */,
+ GLint /* dstY1 */,
+ GLbitfield /* mask */,
+ GLenum /* filter */) {
+}
+void GLES2InterfaceStub::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum /* target */,
+ GLsizei /* samples */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::RenderbufferStorageMultisampleEXT(
+ GLenum /* target */,
+ GLsizei /* samples */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::FramebufferTexture2DMultisampleEXT(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* textarget */,
+ GLuint /* texture */,
+ GLint /* level */,
+ GLsizei /* samples */) {
+}
+void GLES2InterfaceStub::TexStorage2DEXT(GLenum /* target */,
+ GLsizei /* levels */,
+ GLenum /* internalFormat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::GenQueriesEXT(GLsizei /* n */, GLuint* /* queries */) {
+}
+void GLES2InterfaceStub::DeleteQueriesEXT(GLsizei /* n */,
+ const GLuint* /* queries */) {
+}
+GLboolean GLES2InterfaceStub::IsQueryEXT(GLuint /* id */) {
+ return 0;
+}
+void GLES2InterfaceStub::BeginQueryEXT(GLenum /* target */, GLuint /* id */) {
+}
+void GLES2InterfaceStub::EndQueryEXT(GLenum /* target */) {
+}
+void GLES2InterfaceStub::GetQueryivEXT(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetQueryObjectuivEXT(GLuint /* id */,
+ GLenum /* pname */,
+ GLuint* /* params */) {
+}
+void GLES2InterfaceStub::InsertEventMarkerEXT(GLsizei /* length */,
+ const GLchar* /* marker */) {
+}
+void GLES2InterfaceStub::PushGroupMarkerEXT(GLsizei /* length */,
+ const GLchar* /* marker */) {
+}
+void GLES2InterfaceStub::PopGroupMarkerEXT() {
+}
+void GLES2InterfaceStub::GenVertexArraysOES(GLsizei /* n */,
+ GLuint* /* arrays */) {
+}
+void GLES2InterfaceStub::DeleteVertexArraysOES(GLsizei /* n */,
+ const GLuint* /* arrays */) {
+}
+GLboolean GLES2InterfaceStub::IsVertexArrayOES(GLuint /* array */) {
+ return 0;
+}
+void GLES2InterfaceStub::BindVertexArrayOES(GLuint /* array */) {
+}
+void GLES2InterfaceStub::SwapBuffers() {
+}
+GLuint GLES2InterfaceStub::GetMaxValueInBufferCHROMIUM(GLuint /* buffer_id */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ GLuint /* offset */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::EnableFeatureCHROMIUM(const char* /* feature */) {
+ return 0;
+}
+void* GLES2InterfaceStub::MapBufferCHROMIUM(GLuint /* target */,
+ GLenum /* access */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::UnmapBufferCHROMIUM(GLuint /* target */) {
+ return 0;
+}
+void* GLES2InterfaceStub::MapImageCHROMIUM(GLuint /* image_id */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapImageCHROMIUM(GLuint /* image_id */) {
+}
+void* GLES2InterfaceStub::MapBufferSubDataCHROMIUM(GLuint /* target */,
+ GLintptr /* offset */,
+ GLsizeiptr /* size */,
+ GLenum /* access */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapBufferSubDataCHROMIUM(const void* /* mem */) {
+}
+void* GLES2InterfaceStub::MapTexSubImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ GLenum /* access */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapTexSubImage2DCHROMIUM(const void* /* mem */) {
+}
+void GLES2InterfaceStub::ResizeCHROMIUM(GLuint /* width */,
+ GLuint /* height */,
+ GLfloat /* scale_factor */) {
+}
+const GLchar* GLES2InterfaceStub::GetRequestableExtensionsCHROMIUM() {
+ return 0;
+}
+void GLES2InterfaceStub::RequestExtensionCHROMIUM(const char* /* extension */) {
+}
+void GLES2InterfaceStub::RateLimitOffscreenContextCHROMIUM() {
+}
+void GLES2InterfaceStub::GetMultipleIntegervCHROMIUM(const GLenum* /* pnames */,
+ GLuint /* count */,
+ GLint* /* results */,
+ GLsizeiptr /* size */) {
+}
+void GLES2InterfaceStub::GetProgramInfoCHROMIUM(GLuint /* program */,
+ GLsizei /* bufsize */,
+ GLsizei* /* size */,
+ void* /* info */) {
+}
+GLuint GLES2InterfaceStub::CreateStreamTextureCHROMIUM(GLuint /* texture */) {
+ return 0;
+}
+GLuint GLES2InterfaceStub::CreateImageCHROMIUM(GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* internalformat */,
+ GLenum /* usage */) {
+ return 0;
+}
+void GLES2InterfaceStub::DestroyImageCHROMIUM(GLuint /* image_id */) {
+}
+void GLES2InterfaceStub::GetImageParameterivCHROMIUM(GLuint /* image_id */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+GLuint GLES2InterfaceStub::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* internalformat */,
+ GLenum /* usage */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetTranslatedShaderSourceANGLE(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* source */) {
+}
+void GLES2InterfaceStub::PostSubBufferCHROMIUM(GLint /* x */,
+ GLint /* y */,
+ GLint /* width */,
+ GLint /* height */) {
+}
+void GLES2InterfaceStub::TexImageIOSurface2DCHROMIUM(GLenum /* target */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLuint /* ioSurfaceId */,
+ GLuint /* plane */) {
+}
+void GLES2InterfaceStub::CopyTextureCHROMIUM(GLenum /* target */,
+ GLenum /* source_id */,
+ GLenum /* dest_id */,
+ GLint /* level */,
+ GLint /* internalformat */,
+ GLenum /* dest_type */) {
+}
+void GLES2InterfaceStub::DrawArraysInstancedANGLE(GLenum /* mode */,
+ GLint /* first */,
+ GLsizei /* count */,
+ GLsizei /* primcount */) {
+}
+void GLES2InterfaceStub::DrawElementsInstancedANGLE(GLenum /* mode */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ const void* /* indices */,
+ GLsizei /* primcount */) {
+}
+void GLES2InterfaceStub::VertexAttribDivisorANGLE(GLuint /* index */,
+ GLuint /* divisor */) {
+}
+void GLES2InterfaceStub::GenMailboxCHROMIUM(GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ProduceTextureCHROMIUM(GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ProduceTextureDirectCHROMIUM(
+ GLuint /* texture */,
+ GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ConsumeTextureCHROMIUM(GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+GLuint GLES2InterfaceStub::CreateAndConsumeTextureCHROMIUM(
+ GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+ return 0;
+}
+void GLES2InterfaceStub::BindUniformLocationCHROMIUM(GLuint /* program */,
+ GLint /* location */,
+ const char* /* name */) {
+}
+void GLES2InterfaceStub::BindTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* imageId */) {
+}
+void GLES2InterfaceStub::ReleaseTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* imageId */) {
+}
+void GLES2InterfaceStub::TraceBeginCHROMIUM(const char* /* name */) {
+}
+void GLES2InterfaceStub::TraceEndCHROMIUM() {
+}
+void GLES2InterfaceStub::AsyncTexSubImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::AsyncTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::WaitAsyncTexImage2DCHROMIUM(GLenum /* target */) {
+}
+void GLES2InterfaceStub::WaitAllAsyncTexImage2DCHROMIUM() {
+}
+void GLES2InterfaceStub::DiscardFramebufferEXT(
+ GLenum /* target */,
+ GLsizei /* count */,
+ const GLenum* /* attachments */) {
+}
+void GLES2InterfaceStub::LoseContextCHROMIUM(GLenum /* current */,
+ GLenum /* other */) {
+}
+GLuint GLES2InterfaceStub::InsertSyncPointCHROMIUM() {
+ return 0;
+}
+void GLES2InterfaceStub::WaitSyncPointCHROMIUM(GLuint /* sync_point */) {
+}
+void GLES2InterfaceStub::DrawBuffersEXT(GLsizei /* count */,
+ const GLenum* /* bufs */) {
+}
+void GLES2InterfaceStub::DiscardBackbufferCHROMIUM() {
+}
+void GLES2InterfaceStub::ScheduleOverlayPlaneCHROMIUM(
+ GLint /* plane_z_order */,
+ GLenum /* plane_transform */,
+ GLuint /* overlay_texture_id */,
+ GLint /* bounds_x */,
+ GLint /* bounds_y */,
+ GLint /* bounds_width */,
+ GLint /* bounds_height */,
+ GLfloat /* uv_x */,
+ GLfloat /* uv_y */,
+ GLfloat /* uv_width */,
+ GLfloat /* uv_height */) {
+}
+void GLES2InterfaceStub::MatrixLoadfCHROMIUM(GLenum /* matrixMode */,
+ const GLfloat* /* m */) {
+}
+void GLES2InterfaceStub::MatrixLoadIdentityCHROMIUM(GLenum /* matrixMode */) {
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_lib.cc b/gpu/command_buffer/client/gles2_lib.cc
new file mode 100644
index 0000000..6c8b0ea
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_lib.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include <string.h>
+#include "gpu/command_buffer/common/thread_local.h"
+
+namespace gles2 {
+
+// This is defined in gles2_c_lib_autogen.h
+extern "C" {
+extern const NameToFunc g_gles2_function_table[];
+}
+
+// TODO(kbr): the use of this anonymous namespace core dumps the
+// linker on Mac OS X 10.6 when the symbol ordering file is used
+// namespace {
+static gpu::ThreadLocalKey g_gl_context_key;
+// } // namespace anonymous
+
+void Initialize() {
+ g_gl_context_key = gpu::ThreadLocalAlloc();
+}
+
+void Terminate() {
+ gpu::ThreadLocalFree(g_gl_context_key);
+ g_gl_context_key = 0;
+}
+
+gpu::gles2::GLES2Interface* GetGLContext() {
+ return static_cast<gpu::gles2::GLES2Interface*>(
+ gpu::ThreadLocalGetValue(g_gl_context_key));
+}
+
+void SetGLContext(gpu::gles2::GLES2Interface* context) {
+ gpu::ThreadLocalSetValue(g_gl_context_key, context);
+}
+
+GLES2FunctionPointer GetGLFunctionPointer(const char* name) {
+ for (const NameToFunc* named_function = g_gles2_function_table;
+ named_function->name;
+ ++named_function) {
+ if (!strcmp(name, named_function->name)) {
+ return named_function->func;
+ }
+ }
+ return NULL;
+}
+
+} // namespace gles2
+
+
+
+
diff --git a/gpu/command_buffer/client/gles2_lib.h b/gpu/command_buffer/client/gles2_lib.h
new file mode 100644
index 0000000..b90a2d8
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_lib.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These functions emulate GLES2 over command buffers.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
+
+#include "gpu/command_buffer/client/gles2_c_lib_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gles2 {
+
+typedef void (*GLES2FunctionPointer)(void);
+
+struct NameToFunc {
+ const char* name;
+ gles2::GLES2FunctionPointer func;
+};
+
+// Initialize the GLES2 library.
+GLES2_C_LIB_EXPORT void Initialize();
+
+// Terminate the GLES2 library.
+GLES2_C_LIB_EXPORT void Terminate();
+
+// Get the current GL context.
+GLES2_C_LIB_EXPORT gpu::gles2::GLES2Interface* GetGLContext();
+
+// Set the current GL context.
+GLES2_C_LIB_EXPORT void SetGLContext(gpu::gles2::GLES2Interface* impl);
+
+GLES2_C_LIB_EXPORT GLES2FunctionPointer GetGLFunctionPointer(const char* name);
+
+} // namespace gles2
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
diff --git a/gpu/command_buffer/client/gles2_trace_implementation.cc b/gpu/command_buffer/client/gles2_trace_implementation.cc
new file mode 100644
index 0000000..234f243
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_trace_implementation.h"
+#include "gpu/command_buffer/common/trace_event.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2TraceImplementation::GLES2TraceImplementation(GLES2Interface* gl)
+ : gl_(gl) {
+}
+
+GLES2TraceImplementation::~GLES2TraceImplementation() {
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/client/gles2_trace_implementation.h b/gpu/command_buffer/client/gles2_trace_implementation.h
new file mode 100644
index 0000000..c215231
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+
+#include "base/compiler_specific.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gpu {
+namespace gles2 {
+
+// GLES2TraceImplementation is calls TRACE for every GL call.
+class GLES2_IMPL_EXPORT GLES2TraceImplementation
+ : NON_EXPORTED_BASE(public GLES2Interface) {
+ public:
+ explicit GLES2TraceImplementation(GLES2Interface* gl);
+ virtual ~GLES2TraceImplementation();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_trace_implementation_autogen.h"
+
+ private:
+ GLES2Interface* gl_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
new file mode 100644
index 0000000..1083251
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -0,0 +1,536 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_trace_implementation.h
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+virtual void Clear(GLbitfield mask) OVERRIDE;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+virtual void ClearStencil(GLint s) OVERRIDE;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+virtual void CompileShader(GLuint shader) OVERRIDE;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual GLuint CreateProgram() OVERRIDE;
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+virtual void CullFace(GLenum mode) OVERRIDE;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+virtual void DepthFunc(GLenum func) OVERRIDE;
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void Disable(GLenum cap) OVERRIDE;
+virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+virtual void Enable(GLenum cap) OVERRIDE;
+virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void Finish() OVERRIDE;
+virtual void Flush() OVERRIDE;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+virtual void FrontFace(GLenum mode) OVERRIDE;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLenum GetError() OVERRIDE;
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+virtual void LineWidth(GLfloat width) OVERRIDE;
+virtual void LinkProgram(GLuint program) OVERRIDE;
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+virtual void ReleaseShaderCompiler() OVERRIDE;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+virtual void StencilMask(GLuint mask) OVERRIDE;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UseProgram(GLuint program) OVERRIDE;
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+virtual void PopGroupMarkerEXT() OVERRIDE;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+virtual void SwapBuffers() OVERRIDE;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+virtual void TraceEndCHROMIUM() OVERRIDE;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
new file mode 100644
index 0000000..4495967
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -0,0 +1,1530 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_trace_implementation.cc
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
+
+void GLES2TraceImplementation::ActiveTexture(GLenum texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ActiveTexture");
+ gl_->ActiveTexture(texture);
+}
+
+void GLES2TraceImplementation::AttachShader(GLuint program, GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::AttachShader");
+ gl_->AttachShader(program, shader);
+}
+
+void GLES2TraceImplementation::BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindAttribLocation");
+ gl_->BindAttribLocation(program, index, name);
+}
+
+void GLES2TraceImplementation::BindBuffer(GLenum target, GLuint buffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindBuffer");
+ gl_->BindBuffer(target, buffer);
+}
+
+void GLES2TraceImplementation::BindFramebuffer(GLenum target,
+ GLuint framebuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindFramebuffer");
+ gl_->BindFramebuffer(target, framebuffer);
+}
+
+void GLES2TraceImplementation::BindRenderbuffer(GLenum target,
+ GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindRenderbuffer");
+ gl_->BindRenderbuffer(target, renderbuffer);
+}
+
+void GLES2TraceImplementation::BindTexture(GLenum target, GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindTexture");
+ gl_->BindTexture(target, texture);
+}
+
+void GLES2TraceImplementation::BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendColor");
+ gl_->BlendColor(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::BlendEquation(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquation");
+ gl_->BlendEquation(mode);
+}
+
+void GLES2TraceImplementation::BlendEquationSeparate(GLenum modeRGB,
+ GLenum modeAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationSeparate");
+ gl_->BlendEquationSeparate(modeRGB, modeAlpha);
+}
+
+void GLES2TraceImplementation::BlendFunc(GLenum sfactor, GLenum dfactor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFunc");
+ gl_->BlendFunc(sfactor, dfactor);
+}
+
+void GLES2TraceImplementation::BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFuncSeparate");
+ gl_->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+
+void GLES2TraceImplementation::BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BufferData");
+ gl_->BufferData(target, size, data, usage);
+}
+
+void GLES2TraceImplementation::BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BufferSubData");
+ gl_->BufferSubData(target, offset, size, data);
+}
+
+GLenum GLES2TraceImplementation::CheckFramebufferStatus(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CheckFramebufferStatus");
+ return gl_->CheckFramebufferStatus(target);
+}
+
+void GLES2TraceImplementation::Clear(GLbitfield mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Clear");
+ gl_->Clear(mask);
+}
+
+void GLES2TraceImplementation::ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearColor");
+ gl_->ClearColor(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::ClearDepthf(GLclampf depth) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearDepthf");
+ gl_->ClearDepthf(depth);
+}
+
+void GLES2TraceImplementation::ClearStencil(GLint s) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearStencil");
+ gl_->ClearStencil(s);
+}
+
+void GLES2TraceImplementation::ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ColorMask");
+ gl_->ColorMask(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::CompileShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompileShader");
+ gl_->CompileShader(shader);
+}
+
+void GLES2TraceImplementation::CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompressedTexImage2D");
+ gl_->CompressedTexImage2D(
+ target, level, internalformat, width, height, border, imageSize, data);
+}
+
+void GLES2TraceImplementation::CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompressedTexSubImage2D");
+ gl_->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+}
+
+void GLES2TraceImplementation::CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTexImage2D");
+ gl_->CopyTexImage2D(
+ target, level, internalformat, x, y, width, height, border);
+}
+
+void GLES2TraceImplementation::CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTexSubImage2D");
+ gl_->CopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height);
+}
+
+GLuint GLES2TraceImplementation::CreateProgram() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateProgram");
+ return gl_->CreateProgram();
+}
+
+GLuint GLES2TraceImplementation::CreateShader(GLenum type) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateShader");
+ return gl_->CreateShader(type);
+}
+
+void GLES2TraceImplementation::CullFace(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CullFace");
+ gl_->CullFace(mode);
+}
+
+void GLES2TraceImplementation::DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteBuffers");
+ gl_->DeleteBuffers(n, buffers);
+}
+
+void GLES2TraceImplementation::DeleteFramebuffers(GLsizei n,
+ const GLuint* framebuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteFramebuffers");
+ gl_->DeleteFramebuffers(n, framebuffers);
+}
+
+void GLES2TraceImplementation::DeleteProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteProgram");
+ gl_->DeleteProgram(program);
+}
+
+void GLES2TraceImplementation::DeleteRenderbuffers(
+ GLsizei n,
+ const GLuint* renderbuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteRenderbuffers");
+ gl_->DeleteRenderbuffers(n, renderbuffers);
+}
+
+void GLES2TraceImplementation::DeleteShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteShader");
+ gl_->DeleteShader(shader);
+}
+
+void GLES2TraceImplementation::DeleteTextures(GLsizei n,
+ const GLuint* textures) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteTextures");
+ gl_->DeleteTextures(n, textures);
+}
+
+void GLES2TraceImplementation::DepthFunc(GLenum func) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthFunc");
+ gl_->DepthFunc(func);
+}
+
+void GLES2TraceImplementation::DepthMask(GLboolean flag) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthMask");
+ gl_->DepthMask(flag);
+}
+
+void GLES2TraceImplementation::DepthRangef(GLclampf zNear, GLclampf zFar) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthRangef");
+ gl_->DepthRangef(zNear, zFar);
+}
+
+void GLES2TraceImplementation::DetachShader(GLuint program, GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DetachShader");
+ gl_->DetachShader(program, shader);
+}
+
+void GLES2TraceImplementation::Disable(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Disable");
+ gl_->Disable(cap);
+}
+
+void GLES2TraceImplementation::DisableVertexAttribArray(GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DisableVertexAttribArray");
+ gl_->DisableVertexAttribArray(index);
+}
+
+void GLES2TraceImplementation::DrawArrays(GLenum mode,
+ GLint first,
+ GLsizei count) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawArrays");
+ gl_->DrawArrays(mode, first, count);
+}
+
+void GLES2TraceImplementation::DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawElements");
+ gl_->DrawElements(mode, count, type, indices);
+}
+
+void GLES2TraceImplementation::Enable(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Enable");
+ gl_->Enable(cap);
+}
+
+void GLES2TraceImplementation::EnableVertexAttribArray(GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableVertexAttribArray");
+ gl_->EnableVertexAttribArray(index);
+}
+
+void GLES2TraceImplementation::Finish() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Finish");
+ gl_->Finish();
+}
+
+void GLES2TraceImplementation::Flush() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Flush");
+ gl_->Flush();
+}
+
+void GLES2TraceImplementation::FramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FramebufferRenderbuffer");
+ gl_->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+}
+
+void GLES2TraceImplementation::FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FramebufferTexture2D");
+ gl_->FramebufferTexture2D(target, attachment, textarget, texture, level);
+}
+
+void GLES2TraceImplementation::FrontFace(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FrontFace");
+ gl_->FrontFace(mode);
+}
+
+void GLES2TraceImplementation::GenBuffers(GLsizei n, GLuint* buffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenBuffers");
+ gl_->GenBuffers(n, buffers);
+}
+
+void GLES2TraceImplementation::GenerateMipmap(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenerateMipmap");
+ gl_->GenerateMipmap(target);
+}
+
+void GLES2TraceImplementation::GenFramebuffers(GLsizei n,
+ GLuint* framebuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenFramebuffers");
+ gl_->GenFramebuffers(n, framebuffers);
+}
+
+void GLES2TraceImplementation::GenRenderbuffers(GLsizei n,
+ GLuint* renderbuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenRenderbuffers");
+ gl_->GenRenderbuffers(n, renderbuffers);
+}
+
+void GLES2TraceImplementation::GenTextures(GLsizei n, GLuint* textures) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenTextures");
+ gl_->GenTextures(n, textures);
+}
+
+void GLES2TraceImplementation::GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetActiveAttrib");
+ gl_->GetActiveAttrib(program, index, bufsize, length, size, type, name);
+}
+
+void GLES2TraceImplementation::GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetActiveUniform");
+ gl_->GetActiveUniform(program, index, bufsize, length, size, type, name);
+}
+
+void GLES2TraceImplementation::GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetAttachedShaders");
+ gl_->GetAttachedShaders(program, maxcount, count, shaders);
+}
+
+GLint GLES2TraceImplementation::GetAttribLocation(GLuint program,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetAttribLocation");
+ return gl_->GetAttribLocation(program, name);
+}
+
+void GLES2TraceImplementation::GetBooleanv(GLenum pname, GLboolean* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBooleanv");
+ gl_->GetBooleanv(pname, params);
+}
+
+void GLES2TraceImplementation::GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBufferParameteriv");
+ gl_->GetBufferParameteriv(target, pname, params);
+}
+
+GLenum GLES2TraceImplementation::GetError() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetError");
+ return gl_->GetError();
+}
+
+void GLES2TraceImplementation::GetFloatv(GLenum pname, GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetFloatv");
+ gl_->GetFloatv(pname, params);
+}
+
+void GLES2TraceImplementation::GetFramebufferAttachmentParameteriv(
+ GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::GetFramebufferAttachmentParameteriv");
+ gl_->GetFramebufferAttachmentParameteriv(target, attachment, pname, params);
+}
+
+void GLES2TraceImplementation::GetIntegerv(GLenum pname, GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetIntegerv");
+ gl_->GetIntegerv(pname, params);
+}
+
+void GLES2TraceImplementation::GetProgramiv(GLuint program,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramiv");
+ gl_->GetProgramiv(program, pname, params);
+}
+
+void GLES2TraceImplementation::GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramInfoLog");
+ gl_->GetProgramInfoLog(program, bufsize, length, infolog);
+}
+
+void GLES2TraceImplementation::GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetRenderbufferParameteriv");
+ gl_->GetRenderbufferParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetShaderiv(GLuint shader,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderiv");
+ gl_->GetShaderiv(shader, pname, params);
+}
+
+void GLES2TraceImplementation::GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderInfoLog");
+ gl_->GetShaderInfoLog(shader, bufsize, length, infolog);
+}
+
+void GLES2TraceImplementation::GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderPrecisionFormat");
+ gl_->GetShaderPrecisionFormat(shadertype, precisiontype, range, precision);
+}
+
+void GLES2TraceImplementation::GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderSource");
+ gl_->GetShaderSource(shader, bufsize, length, source);
+}
+
+const GLubyte* GLES2TraceImplementation::GetString(GLenum name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetString");
+ return gl_->GetString(name);
+}
+
+void GLES2TraceImplementation::GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetTexParameterfv");
+ gl_->GetTexParameterfv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetTexParameteriv");
+ gl_->GetTexParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformfv");
+ gl_->GetUniformfv(program, location, params);
+}
+
+void GLES2TraceImplementation::GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformiv");
+ gl_->GetUniformiv(program, location, params);
+}
+
+GLint GLES2TraceImplementation::GetUniformLocation(GLuint program,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformLocation");
+ return gl_->GetUniformLocation(program, name);
+}
+
+void GLES2TraceImplementation::GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribfv");
+ gl_->GetVertexAttribfv(index, pname, params);
+}
+
+void GLES2TraceImplementation::GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribiv");
+ gl_->GetVertexAttribiv(index, pname, params);
+}
+
+void GLES2TraceImplementation::GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribPointerv");
+ gl_->GetVertexAttribPointerv(index, pname, pointer);
+}
+
+void GLES2TraceImplementation::Hint(GLenum target, GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Hint");
+ gl_->Hint(target, mode);
+}
+
+GLboolean GLES2TraceImplementation::IsBuffer(GLuint buffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsBuffer");
+ return gl_->IsBuffer(buffer);
+}
+
+GLboolean GLES2TraceImplementation::IsEnabled(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsEnabled");
+ return gl_->IsEnabled(cap);
+}
+
+GLboolean GLES2TraceImplementation::IsFramebuffer(GLuint framebuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsFramebuffer");
+ return gl_->IsFramebuffer(framebuffer);
+}
+
+GLboolean GLES2TraceImplementation::IsProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsProgram");
+ return gl_->IsProgram(program);
+}
+
+GLboolean GLES2TraceImplementation::IsRenderbuffer(GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsRenderbuffer");
+ return gl_->IsRenderbuffer(renderbuffer);
+}
+
+GLboolean GLES2TraceImplementation::IsShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsShader");
+ return gl_->IsShader(shader);
+}
+
+GLboolean GLES2TraceImplementation::IsTexture(GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsTexture");
+ return gl_->IsTexture(texture);
+}
+
+void GLES2TraceImplementation::LineWidth(GLfloat width) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LineWidth");
+ gl_->LineWidth(width);
+}
+
+void GLES2TraceImplementation::LinkProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LinkProgram");
+ gl_->LinkProgram(program);
+}
+
+void GLES2TraceImplementation::PixelStorei(GLenum pname, GLint param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PixelStorei");
+ gl_->PixelStorei(pname, param);
+}
+
+void GLES2TraceImplementation::PolygonOffset(GLfloat factor, GLfloat units) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PolygonOffset");
+ gl_->PolygonOffset(factor, units);
+}
+
+void GLES2TraceImplementation::ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReadPixels");
+ gl_->ReadPixels(x, y, width, height, format, type, pixels);
+}
+
+void GLES2TraceImplementation::ReleaseShaderCompiler() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReleaseShaderCompiler");
+ gl_->ReleaseShaderCompiler();
+}
+
+void GLES2TraceImplementation::RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::RenderbufferStorage");
+ gl_->RenderbufferStorage(target, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::SampleCoverage(GLclampf value,
+ GLboolean invert) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::SampleCoverage");
+ gl_->SampleCoverage(value, invert);
+}
+
+void GLES2TraceImplementation::Scissor(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Scissor");
+ gl_->Scissor(x, y, width, height);
+}
+
+void GLES2TraceImplementation::ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShaderBinary");
+ gl_->ShaderBinary(n, shaders, binaryformat, binary, length);
+}
+
+void GLES2TraceImplementation::ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShaderSource");
+ gl_->ShaderSource(shader, count, str, length);
+}
+
+void GLES2TraceImplementation::ShallowFinishCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShallowFinishCHROMIUM");
+ gl_->ShallowFinishCHROMIUM();
+}
+
+void GLES2TraceImplementation::ShallowFlushCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShallowFlushCHROMIUM");
+ gl_->ShallowFlushCHROMIUM();
+}
+
+void GLES2TraceImplementation::StencilFunc(GLenum func,
+ GLint ref,
+ GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilFunc");
+ gl_->StencilFunc(func, ref, mask);
+}
+
+void GLES2TraceImplementation::StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilFuncSeparate");
+ gl_->StencilFuncSeparate(face, func, ref, mask);
+}
+
+void GLES2TraceImplementation::StencilMask(GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilMask");
+ gl_->StencilMask(mask);
+}
+
+void GLES2TraceImplementation::StencilMaskSeparate(GLenum face, GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilMaskSeparate");
+ gl_->StencilMaskSeparate(face, mask);
+}
+
+void GLES2TraceImplementation::StencilOp(GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilOp");
+ gl_->StencilOp(fail, zfail, zpass);
+}
+
+void GLES2TraceImplementation::StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilOpSeparate");
+ gl_->StencilOpSeparate(face, fail, zfail, zpass);
+}
+
+void GLES2TraceImplementation::TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexImage2D");
+ gl_->TexImage2D(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+
+void GLES2TraceImplementation::TexParameterf(GLenum target,
+ GLenum pname,
+ GLfloat param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameterf");
+ gl_->TexParameterf(target, pname, param);
+}
+
+void GLES2TraceImplementation::TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameterfv");
+ gl_->TexParameterfv(target, pname, params);
+}
+
+void GLES2TraceImplementation::TexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameteri");
+ gl_->TexParameteri(target, pname, param);
+}
+
+void GLES2TraceImplementation::TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameteriv");
+ gl_->TexParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexSubImage2D");
+ gl_->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+
+void GLES2TraceImplementation::Uniform1f(GLint location, GLfloat x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1f");
+ gl_->Uniform1f(location, x);
+}
+
+void GLES2TraceImplementation::Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1fv");
+ gl_->Uniform1fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform1i(GLint location, GLint x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1i");
+ gl_->Uniform1i(location, x);
+}
+
+void GLES2TraceImplementation::Uniform1iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1iv");
+ gl_->Uniform1iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2f");
+ gl_->Uniform2f(location, x, y);
+}
+
+void GLES2TraceImplementation::Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2fv");
+ gl_->Uniform2fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform2i(GLint location, GLint x, GLint y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2i");
+ gl_->Uniform2i(location, x, y);
+}
+
+void GLES2TraceImplementation::Uniform2iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2iv");
+ gl_->Uniform2iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3f");
+ gl_->Uniform3f(location, x, y, z);
+}
+
+void GLES2TraceImplementation::Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3fv");
+ gl_->Uniform3fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform3i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3i");
+ gl_->Uniform3i(location, x, y, z);
+}
+
+void GLES2TraceImplementation::Uniform3iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3iv");
+ gl_->Uniform3iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4f");
+ gl_->Uniform4f(location, x, y, z, w);
+}
+
+void GLES2TraceImplementation::Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4fv");
+ gl_->Uniform4fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4i");
+ gl_->Uniform4i(location, x, y, z, w);
+}
+
+void GLES2TraceImplementation::Uniform4iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4iv");
+ gl_->Uniform4iv(location, count, v);
+}
+
+void GLES2TraceImplementation::UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix2fv");
+ gl_->UniformMatrix2fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix3fv");
+ gl_->UniformMatrix3fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix4fv");
+ gl_->UniformMatrix4fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UseProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UseProgram");
+ gl_->UseProgram(program);
+}
+
+void GLES2TraceImplementation::ValidateProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ValidateProgram");
+ gl_->ValidateProgram(program);
+}
+
+void GLES2TraceImplementation::VertexAttrib1f(GLuint indx, GLfloat x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib1f");
+ gl_->VertexAttrib1f(indx, x);
+}
+
+void GLES2TraceImplementation::VertexAttrib1fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib1fv");
+ gl_->VertexAttrib1fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib2f(GLuint indx,
+ GLfloat x,
+ GLfloat y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib2f");
+ gl_->VertexAttrib2f(indx, x, y);
+}
+
+void GLES2TraceImplementation::VertexAttrib2fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib2fv");
+ gl_->VertexAttrib2fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib3f");
+ gl_->VertexAttrib3f(indx, x, y, z);
+}
+
+void GLES2TraceImplementation::VertexAttrib3fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib3fv");
+ gl_->VertexAttrib3fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib4f");
+ gl_->VertexAttrib4f(indx, x, y, z, w);
+}
+
+void GLES2TraceImplementation::VertexAttrib4fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib4fv");
+ gl_->VertexAttrib4fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttribPointer");
+ gl_->VertexAttribPointer(indx, size, type, normalized, stride, ptr);
+}
+
+void GLES2TraceImplementation::Viewport(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Viewport");
+ gl_->Viewport(x, y, width, height);
+}
+
+void GLES2TraceImplementation::BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlitFramebufferCHROMIUM");
+ gl_->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+}
+
+void GLES2TraceImplementation::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RenderbufferStorageMultisampleCHROMIUM");
+ gl_->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::RenderbufferStorageMultisampleEXT(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RenderbufferStorageMultisampleEXT");
+ gl_->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::FramebufferTexture2DMultisampleEXT(
+ GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::FramebufferTexture2DMultisampleEXT");
+ gl_->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, level, samples);
+}
+
+void GLES2TraceImplementation::TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexStorage2DEXT");
+ gl_->TexStorage2DEXT(target, levels, internalFormat, width, height);
+}
+
+void GLES2TraceImplementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenQueriesEXT");
+ gl_->GenQueriesEXT(n, queries);
+}
+
+void GLES2TraceImplementation::DeleteQueriesEXT(GLsizei n,
+ const GLuint* queries) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteQueriesEXT");
+ gl_->DeleteQueriesEXT(n, queries);
+}
+
+GLboolean GLES2TraceImplementation::IsQueryEXT(GLuint id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsQueryEXT");
+ return gl_->IsQueryEXT(id);
+}
+
+void GLES2TraceImplementation::BeginQueryEXT(GLenum target, GLuint id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BeginQueryEXT");
+ gl_->BeginQueryEXT(target, id);
+}
+
+void GLES2TraceImplementation::EndQueryEXT(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EndQueryEXT");
+ gl_->EndQueryEXT(target);
+}
+
+void GLES2TraceImplementation::GetQueryivEXT(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetQueryivEXT");
+ gl_->GetQueryivEXT(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetQueryObjectuivEXT");
+ gl_->GetQueryObjectuivEXT(id, pname, params);
+}
+
+void GLES2TraceImplementation::InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::InsertEventMarkerEXT");
+ gl_->InsertEventMarkerEXT(length, marker);
+}
+
+void GLES2TraceImplementation::PushGroupMarkerEXT(GLsizei length,
+ const GLchar* marker) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PushGroupMarkerEXT");
+ gl_->PushGroupMarkerEXT(length, marker);
+}
+
+void GLES2TraceImplementation::PopGroupMarkerEXT() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PopGroupMarkerEXT");
+ gl_->PopGroupMarkerEXT();
+}
+
+void GLES2TraceImplementation::GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenVertexArraysOES");
+ gl_->GenVertexArraysOES(n, arrays);
+}
+
+void GLES2TraceImplementation::DeleteVertexArraysOES(GLsizei n,
+ const GLuint* arrays) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteVertexArraysOES");
+ gl_->DeleteVertexArraysOES(n, arrays);
+}
+
+GLboolean GLES2TraceImplementation::IsVertexArrayOES(GLuint array) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsVertexArrayOES");
+ return gl_->IsVertexArrayOES(array);
+}
+
+void GLES2TraceImplementation::BindVertexArrayOES(GLuint array) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindVertexArrayOES");
+ gl_->BindVertexArrayOES(array);
+}
+
+void GLES2TraceImplementation::SwapBuffers() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::SwapBuffers");
+ gl_->SwapBuffers();
+}
+
+GLuint GLES2TraceImplementation::GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetMaxValueInBufferCHROMIUM");
+ return gl_->GetMaxValueInBufferCHROMIUM(buffer_id, count, type, offset);
+}
+
+GLboolean GLES2TraceImplementation::EnableFeatureCHROMIUM(const char* feature) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableFeatureCHROMIUM");
+ return gl_->EnableFeatureCHROMIUM(feature);
+}
+
+void* GLES2TraceImplementation::MapBufferCHROMIUM(GLuint target,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapBufferCHROMIUM");
+ return gl_->MapBufferCHROMIUM(target, access);
+}
+
+GLboolean GLES2TraceImplementation::UnmapBufferCHROMIUM(GLuint target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UnmapBufferCHROMIUM");
+ return gl_->UnmapBufferCHROMIUM(target);
+}
+
+void* GLES2TraceImplementation::MapImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapImageCHROMIUM");
+ return gl_->MapImageCHROMIUM(image_id);
+}
+
+void GLES2TraceImplementation::UnmapImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UnmapImageCHROMIUM");
+ gl_->UnmapImageCHROMIUM(image_id);
+}
+
+void* GLES2TraceImplementation::MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapBufferSubDataCHROMIUM");
+ return gl_->MapBufferSubDataCHROMIUM(target, offset, size, access);
+}
+
+void GLES2TraceImplementation::UnmapBufferSubDataCHROMIUM(const void* mem) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::UnmapBufferSubDataCHROMIUM");
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+}
+
+void* GLES2TraceImplementation::MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapTexSubImage2DCHROMIUM");
+ return gl_->MapTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, access);
+}
+
+void GLES2TraceImplementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::UnmapTexSubImage2DCHROMIUM");
+ gl_->UnmapTexSubImage2DCHROMIUM(mem);
+}
+
+void GLES2TraceImplementation::ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ResizeCHROMIUM");
+ gl_->ResizeCHROMIUM(width, height, scale_factor);
+}
+
+const GLchar* GLES2TraceImplementation::GetRequestableExtensionsCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetRequestableExtensionsCHROMIUM");
+ return gl_->GetRequestableExtensionsCHROMIUM();
+}
+
+void GLES2TraceImplementation::RequestExtensionCHROMIUM(const char* extension) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::RequestExtensionCHROMIUM");
+ gl_->RequestExtensionCHROMIUM(extension);
+}
+
+void GLES2TraceImplementation::RateLimitOffscreenContextCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RateLimitOffscreenContextCHROMIUM");
+ gl_->RateLimitOffscreenContextCHROMIUM();
+}
+
+void GLES2TraceImplementation::GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetMultipleIntegervCHROMIUM");
+ gl_->GetMultipleIntegervCHROMIUM(pnames, count, results, size);
+}
+
+void GLES2TraceImplementation::GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramInfoCHROMIUM");
+ gl_->GetProgramInfoCHROMIUM(program, bufsize, size, info);
+}
+
+GLuint GLES2TraceImplementation::CreateStreamTextureCHROMIUM(GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::CreateStreamTextureCHROMIUM");
+ return gl_->CreateStreamTextureCHROMIUM(texture);
+}
+
+GLuint GLES2TraceImplementation::CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateImageCHROMIUM");
+ return gl_->CreateImageCHROMIUM(width, height, internalformat, usage);
+}
+
+void GLES2TraceImplementation::DestroyImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DestroyImageCHROMIUM");
+ gl_->DestroyImageCHROMIUM(image_id);
+}
+
+void GLES2TraceImplementation::GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetImageParameterivCHROMIUM");
+ gl_->GetImageParameterivCHROMIUM(image_id, pname, params);
+}
+
+GLuint GLES2TraceImplementation::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::CreateGpuMemoryBufferImageCHROMIUM");
+ return gl_->CreateGpuMemoryBufferImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+
+void GLES2TraceImplementation::GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetTranslatedShaderSourceANGLE");
+ gl_->GetTranslatedShaderSourceANGLE(shader, bufsize, length, source);
+}
+
+void GLES2TraceImplementation::PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PostSubBufferCHROMIUM");
+ gl_->PostSubBufferCHROMIUM(x, y, width, height);
+}
+
+void GLES2TraceImplementation::TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::TexImageIOSurface2DCHROMIUM");
+ gl_->TexImageIOSurface2DCHROMIUM(target, width, height, ioSurfaceId, plane);
+}
+
+void GLES2TraceImplementation::CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTextureCHROMIUM");
+ gl_->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+}
+
+void GLES2TraceImplementation::DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawArraysInstancedANGLE");
+ gl_->DrawArraysInstancedANGLE(mode, first, count, primcount);
+}
+
+void GLES2TraceImplementation::DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::DrawElementsInstancedANGLE");
+ gl_->DrawElementsInstancedANGLE(mode, count, type, indices, primcount);
+}
+
+void GLES2TraceImplementation::VertexAttribDivisorANGLE(GLuint index,
+ GLuint divisor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttribDivisorANGLE");
+ gl_->VertexAttribDivisorANGLE(index, divisor);
+}
+
+void GLES2TraceImplementation::GenMailboxCHROMIUM(GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenMailboxCHROMIUM");
+ gl_->GenMailboxCHROMIUM(mailbox);
+}
+
+void GLES2TraceImplementation::ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ProduceTextureCHROMIUM");
+ gl_->ProduceTextureCHROMIUM(target, mailbox);
+}
+
+void GLES2TraceImplementation::ProduceTextureDirectCHROMIUM(
+ GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::ProduceTextureDirectCHROMIUM");
+ gl_->ProduceTextureDirectCHROMIUM(texture, target, mailbox);
+}
+
+void GLES2TraceImplementation::ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ConsumeTextureCHROMIUM");
+ gl_->ConsumeTextureCHROMIUM(target, mailbox);
+}
+
+GLuint GLES2TraceImplementation::CreateAndConsumeTextureCHROMIUM(
+ GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::CreateAndConsumeTextureCHROMIUM");
+ return gl_->CreateAndConsumeTextureCHROMIUM(target, mailbox);
+}
+
+void GLES2TraceImplementation::BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::BindUniformLocationCHROMIUM");
+ gl_->BindUniformLocationCHROMIUM(program, location, name);
+}
+
+void GLES2TraceImplementation::BindTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindTexImage2DCHROMIUM");
+ gl_->BindTexImage2DCHROMIUM(target, imageId);
+}
+
+void GLES2TraceImplementation::ReleaseTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReleaseTexImage2DCHROMIUM");
+ gl_->ReleaseTexImage2DCHROMIUM(target, imageId);
+}
+
+void GLES2TraceImplementation::TraceBeginCHROMIUM(const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TraceBeginCHROMIUM");
+ gl_->TraceBeginCHROMIUM(name);
+}
+
+void GLES2TraceImplementation::TraceEndCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TraceEndCHROMIUM");
+ gl_->TraceEndCHROMIUM();
+}
+
+void GLES2TraceImplementation::AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::AsyncTexSubImage2DCHROMIUM");
+ gl_->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+}
+
+void GLES2TraceImplementation::AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::AsyncTexImage2DCHROMIUM");
+ gl_->AsyncTexImage2DCHROMIUM(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+
+void GLES2TraceImplementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::WaitAsyncTexImage2DCHROMIUM");
+ gl_->WaitAsyncTexImage2DCHROMIUM(target);
+}
+
+void GLES2TraceImplementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::WaitAllAsyncTexImage2DCHROMIUM");
+ gl_->WaitAllAsyncTexImage2DCHROMIUM();
+}
+
+void GLES2TraceImplementation::DiscardFramebufferEXT(
+ GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DiscardFramebufferEXT");
+ gl_->DiscardFramebufferEXT(target, count, attachments);
+}
+
+void GLES2TraceImplementation::LoseContextCHROMIUM(GLenum current,
+ GLenum other) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LoseContextCHROMIUM");
+ gl_->LoseContextCHROMIUM(current, other);
+}
+
+GLuint GLES2TraceImplementation::InsertSyncPointCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::InsertSyncPointCHROMIUM");
+ return gl_->InsertSyncPointCHROMIUM();
+}
+
+void GLES2TraceImplementation::WaitSyncPointCHROMIUM(GLuint sync_point) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::WaitSyncPointCHROMIUM");
+ gl_->WaitSyncPointCHROMIUM(sync_point);
+}
+
+void GLES2TraceImplementation::DrawBuffersEXT(GLsizei count,
+ const GLenum* bufs) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawBuffersEXT");
+ gl_->DrawBuffersEXT(count, bufs);
+}
+
+void GLES2TraceImplementation::DiscardBackbufferCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DiscardBackbufferCHROMIUM");
+ gl_->DiscardBackbufferCHROMIUM();
+}
+
+void GLES2TraceImplementation::ScheduleOverlayPlaneCHROMIUM(
+ GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::ScheduleOverlayPlaneCHROMIUM");
+ gl_->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+}
+
+void GLES2TraceImplementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
+ const GLfloat* m) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MatrixLoadfCHROMIUM");
+ gl_->MatrixLoadfCHROMIUM(matrixMode, m);
+}
+
+void GLES2TraceImplementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::MatrixLoadIdentityCHROMIUM");
+ gl_->MatrixLoadIdentityCHROMIUM(matrixMode);
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gpu_control.h b/gpu/command_buffer/client/gpu_control.h
new file mode 100644
index 0000000..b28757c
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_control.h
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GpuMemoryBuffer;
+}
+
+namespace gpu {
+
+// Common interface for GpuControl implementations.
+class GPU_EXPORT GpuControl {
+ public:
+ GpuControl() {}
+ virtual ~GpuControl() {}
+
+ virtual Capabilities GetCapabilities() = 0;
+
+ // Create a gpu memory buffer of the given dimensions and format. Returns
+ // its ID or -1 on error.
+ virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(
+ size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32_t* id) = 0;
+
+ // Destroy a gpu memory buffer. The ID must be positive.
+ virtual void DestroyGpuMemoryBuffer(int32_t id) = 0;
+
+ // Inserts a sync point, returning its ID. Sync point IDs are global and can
+ // be used for cross-context synchronization.
+ virtual uint32_t InsertSyncPoint() = 0;
+
+ // Inserts a future sync point, returning its ID. Sync point IDs are global
+ // and can be used for cross-context synchronization. The sync point won't be
+ // retired immediately.
+ virtual uint32_t InsertFutureSyncPoint() = 0;
+
+ // Retires a future sync point. This will signal contexts that are waiting
+ // on it to start executing.
+ virtual void RetireSyncPoint(uint32_t sync_point) = 0;
+
+ // Runs |callback| when a sync point is reached.
+ virtual void SignalSyncPoint(uint32_t sync_point,
+ const base::Closure& callback) = 0;
+
+ // Runs |callback| when a query created via glCreateQueryEXT() has cleared
+ // passed the glEndQueryEXT() point.
+ virtual void SignalQuery(uint32_t query, const base::Closure& callback) = 0;
+
+ virtual void SetSurfaceVisible(bool visible) = 0;
+
+ // Attaches an external stream to the texture given by |texture_id| and
+ // returns a stream identifier.
+ virtual uint32_t CreateStreamTexture(uint32_t texture_id) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GpuControl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
diff --git a/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc b/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc
new file mode 100644
index 0000000..9ffe0e3
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+
+namespace gpu {
+namespace gles2 {
+
+GpuMemoryBufferTracker::GpuMemoryBufferTracker(GpuControl* gpu_control)
+ : gpu_control_(gpu_control) {
+}
+
+GpuMemoryBufferTracker::~GpuMemoryBufferTracker() {
+ while (!buffers_.empty()) {
+ RemoveBuffer(buffers_.begin()->first);
+ }
+}
+
+int32 GpuMemoryBufferTracker::CreateBuffer(size_t width,
+ size_t height,
+ int32 internalformat,
+ int32 usage) {
+ int32 image_id = 0;
+ DCHECK(gpu_control_);
+ gfx::GpuMemoryBuffer* buffer = gpu_control_->CreateGpuMemoryBuffer(
+ width, height, internalformat, usage, &image_id);
+ if (!buffer)
+ return 0;
+
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(image_id, buffer));
+ DCHECK(result.second);
+
+ return image_id;
+}
+
+gfx::GpuMemoryBuffer* GpuMemoryBufferTracker::GetBuffer(int32 image_id) {
+ BufferMap::iterator it = buffers_.find(image_id);
+ return (it != buffers_.end()) ? it->second : NULL;
+}
+
+void GpuMemoryBufferTracker::RemoveBuffer(int32 image_id) {
+ BufferMap::iterator buffer_it = buffers_.find(image_id);
+ if (buffer_it != buffers_.end())
+ buffers_.erase(buffer_it);
+ DCHECK(gpu_control_);
+ gpu_control_->DestroyGpuMemoryBuffer(image_id);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gpu_memory_buffer_tracker.h b/gpu/command_buffer/client/gpu_memory_buffer_tracker.h
new file mode 100644
index 0000000..25ec949
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_memory_buffer_tracker.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+
+namespace gfx {
+class GpuMemoryBuffer;
+}
+
+namespace gpu {
+class GpuControl;
+
+namespace gles2 {
+
+// Tracks GPU memory buffer objects on the client side.
+class GLES2_IMPL_EXPORT GpuMemoryBufferTracker {
+ public:
+ explicit GpuMemoryBufferTracker(GpuControl* gpu_control);
+ virtual ~GpuMemoryBufferTracker();
+
+ int32 CreateBuffer(size_t width,
+ size_t height,
+ int32 internalformat,
+ int32 usage);
+ gfx::GpuMemoryBuffer* GetBuffer(int32 image_id);
+ void RemoveBuffer(int32 image_id);
+
+ private:
+ typedef base::hash_map<int32, gfx::GpuMemoryBuffer*> BufferMap;
+ BufferMap buffers_;
+ GpuControl* gpu_control_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
diff --git a/gpu/command_buffer/client/gpu_switches.cc b/gpu/command_buffer/client/gpu_switches.cc
new file mode 100644
index 0000000..f933c19
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_switches.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gpu_switches.h"
+#include "base/basictypes.h"
+
+namespace switches {
+
+// Enable GPU client logging.
+const char kEnableGPUClientLogging[] = "enable-gpu-client-logging";
+
+} // namespace switches
diff --git a/gpu/command_buffer/client/gpu_switches.h b/gpu/command_buffer/client/gpu_switches.h
new file mode 100644
index 0000000..523e5a4
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_switches.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/command_buffer/client/.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+
+namespace switches {
+
+GLES2_IMPL_EXPORT extern const char kEnableGPUClientLogging[];
+
+} // namespace switches
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+
diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc
new file mode 100644
index 0000000..fc6ca5d
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+MemoryChunk::MemoryChunk(int32 shm_id,
+ scoped_refptr<gpu::Buffer> shm,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
+ : shm_id_(shm_id),
+ shm_(shm),
+ allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
+
+MemoryChunk::~MemoryChunk() {}
+
+MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ size_t unused_memory_reclaim_limit)
+ : chunk_size_multiple_(1),
+ helper_(helper),
+ poll_callback_(poll_callback),
+ allocated_memory_(0),
+ max_free_bytes_(unused_memory_reclaim_limit) {
+}
+
+MappedMemoryManager::~MappedMemoryManager() {
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ for (MemoryChunkVector::iterator iter = chunks_.begin();
+ iter != chunks_.end(); ++iter) {
+ MemoryChunk* chunk = *iter;
+ cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ }
+}
+
+void* MappedMemoryManager::Alloc(
+ unsigned int size, int32* shm_id, unsigned int* shm_offset) {
+ DCHECK(shm_id);
+ DCHECK(shm_offset);
+ if (size <= allocated_memory_) {
+ size_t total_bytes_in_use = 0;
+ // See if any of the chunks can satisfy this request.
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ chunk->FreeUnused();
+ total_bytes_in_use += chunk->bytes_in_use();
+ if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+
+ // If there is a memory limit being enforced and total free
+ // memory (allocated_memory_ - total_bytes_in_use) is larger than
+ // the limit try waiting.
+ if (max_free_bytes_ != kNoLimit &&
+ (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
+ TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+ }
+ }
+
+ // Make a new chunk to satisfy the request.
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ unsigned int chunk_size =
+ ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
+ chunk_size_multiple_;
+ int32 id = -1;
+ scoped_refptr<gpu::Buffer> shm =
+ cmd_buf->CreateTransferBuffer(chunk_size, &id);
+ if (id < 0)
+ return NULL;
+ DCHECK(shm.get());
+ MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
+ allocated_memory_ += mc->GetSize();
+ chunks_.push_back(mc);
+ void* mem = mc->Alloc(size);
+ DCHECK(mem);
+ *shm_id = mc->shm_id();
+ *shm_offset = mc->GetOffset(mem);
+ return mem;
+}
+
+void MappedMemoryManager::Free(void* pointer) {
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->IsInChunk(pointer)) {
+ chunk->Free(pointer);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->IsInChunk(pointer)) {
+ chunk->FreePendingToken(pointer, token);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void MappedMemoryManager::FreeUnused() {
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ MemoryChunkVector::iterator iter = chunks_.begin();
+ while (iter != chunks_.end()) {
+ MemoryChunk* chunk = *iter;
+ chunk->FreeUnused();
+ if (!chunk->InUse()) {
+ cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ allocated_memory_ -= chunk->GetSize();
+ iter = chunks_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/mapped_memory.h b/gpu/command_buffer/client/mapped_memory.h
new file mode 100644
index 0000000..789e69c
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/scoped_vector.h"
+#include "gpu/command_buffer/client/fenced_allocator.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+// Manages a shared memory segment.
+class GPU_EXPORT MemoryChunk {
+ public:
+ MemoryChunk(int32_t shm_id,
+ scoped_refptr<gpu::Buffer> shm,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback);
+ ~MemoryChunk();
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeWithoutWaiting() {
+ return allocator_.GetLargestFreeSize();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeSizeWithWaiting() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ // Gets the size of the chunk.
+ unsigned int GetSize() const {
+ return static_cast<unsigned int>(shm_->size());
+ }
+
+ // The shared memory id for this chunk.
+ int32_t shm_id() const {
+ return shm_id_;
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void* Alloc(unsigned int size) {
+ return allocator_.Alloc(size);
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ // It translates NULL to FencedAllocator::kInvalidOffset.
+ unsigned int GetOffset(void* pointer) {
+ return allocator_.GetOffset(pointer);
+ }
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void* pointer) {
+ allocator_.Free(pointer);
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, unsigned int token) {
+ allocator_.FreePendingToken(pointer, token);
+ }
+
+ // Frees any blocks whose tokens have passed.
+ void FreeUnused() {
+ allocator_.FreeUnused();
+ }
+
+ // Returns true if pointer is in the range of this block.
+ bool IsInChunk(void* pointer) const {
+ return pointer >= shm_->memory() &&
+ pointer <
+ reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
+ }
+
+ // Returns true of any memory in this chunk is in use.
+ bool InUse() {
+ return allocator_.InUse();
+ }
+
+ size_t bytes_in_use() const {
+ return allocator_.bytes_in_use();
+ }
+
+ private:
+ int32_t shm_id_;
+ scoped_refptr<gpu::Buffer> shm_;
+ FencedAllocatorWrapper allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
+};
+
+// Manages MemoryChunks.
+class GPU_EXPORT MappedMemoryManager {
+ public:
+ enum MemoryLimit {
+ kNoLimit = 0,
+ };
+
+ // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
+ // to be reclaimed before allocating more memory.
+ MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ size_t unused_memory_reclaim_limit);
+
+ ~MappedMemoryManager();
+
+ unsigned int chunk_size_multiple() const {
+ return chunk_size_multiple_;
+ }
+
+ void set_chunk_size_multiple(unsigned int multiple) {
+ chunk_size_multiple_ = multiple;
+ }
+
+ // Allocates a block of memory
+ // Parameters:
+ // size: size of memory to allocate.
+ // shm_id: pointer to variable to receive the shared memory id.
+ // shm_offset: pointer to variable to receive the shared memory offset.
+ // Returns:
+ // pointer to allocated block of memory. NULL if failure.
+ void* Alloc(
+ unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void* pointer);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, int32_t token);
+
+ // Free Any Shared memory that is not in use.
+ void FreeUnused();
+
+ // Used for testing
+ size_t num_chunks() const {
+ return chunks_.size();
+ }
+
+ size_t bytes_in_use() const {
+ size_t bytes_in_use = 0;
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ bytes_in_use += chunk->bytes_in_use();
+ }
+ return bytes_in_use;
+ }
+
+ // Used for testing
+ size_t allocated_memory() const {
+ return allocated_memory_;
+ }
+
+ private:
+ typedef ScopedVector<MemoryChunk> MemoryChunkVector;
+
+ // size a chunk is rounded up to.
+ unsigned int chunk_size_multiple_;
+ CommandBufferHelper* helper_;
+ base::Closure poll_callback_;
+ MemoryChunkVector chunks_;
+ size_t allocated_memory_;
+ size_t max_free_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
new file mode 100644
index 0000000..963640a
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -0,0 +1,456 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+#include <list>
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class MappedMemoryTestBase : public testing::Test {
+ protected:
+ static const unsigned int kBufferSize = 1024;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef _MSC_VER
+const unsigned int MappedMemoryTestBase::kBufferSize;
+#endif
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class MemoryChunkTest : public MappedMemoryTestBase {
+ protected:
+ static const int32 kShmId = 123;
+ virtual void SetUp() {
+ MappedMemoryTestBase::SetUp();
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kBufferSize);
+ buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
+ chunk_.reset(new MemoryChunk(kShmId,
+ buffer_,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ MappedMemoryTestBase::TearDown();
+ }
+
+ uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
+
+ scoped_ptr<MemoryChunk> chunk_;
+ scoped_refptr<gpu::Buffer> buffer_;
+};
+
+#ifndef _MSC_VER
+const int32 MemoryChunkTest::kShmId;
+#endif
+
+TEST_F(MemoryChunkTest, Basic) {
+ const unsigned int kSize = 16;
+ EXPECT_EQ(kShmId, chunk_->shm_id());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetSize());
+ void *pointer = chunk_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer));
+ EXPECT_GE(kBufferSize,
+ static_cast<uint8*>(pointer) - buffer_memory() + kSize);
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetSize());
+
+ chunk_->Free(pointer);
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+
+ uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
+ ASSERT_TRUE(pointer_char);
+ EXPECT_LE(buffer_memory(), pointer_char);
+ EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize);
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
+ chunk_->Free(pointer_char);
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+}
+
+class MappedMemoryManagerTest : public MappedMemoryTestBase {
+ public:
+ MappedMemoryManager* manager() const {
+ return manager_.get();
+ }
+
+ protected:
+ virtual void SetUp() {
+ MappedMemoryTestBase::SetUp();
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+ manager_.reset();
+ MappedMemoryTestBase::TearDown();
+ }
+
+ scoped_ptr<MappedMemoryManager> manager_;
+};
+
+TEST_F(MappedMemoryManagerTest, Basic) {
+ const unsigned int kSize = 1024;
+ // Check we can alloc.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+ // Check if we free and realloc the same size we get the same memory
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ manager_->Free(mem1);
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(mem1, mem2);
+ EXPECT_EQ(id1, id2);
+ EXPECT_EQ(offset1, offset2);
+ // Check if we allocate again we get different shared memory
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem3 != NULL);
+ EXPECT_NE(mem2, mem3);
+ EXPECT_NE(id2, id3);
+ EXPECT_EQ(0u, offset3);
+ // Free 3 and allocate 2 half size blocks.
+ manager_->Free(mem3);
+ int32 id4 = -1;
+ int32 id5 = -1;
+ unsigned int offset4 = 0xFFFFFFFFU;
+ unsigned int offset5 = 0xFFFFFFFFU;
+ void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
+ void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
+ ASSERT_TRUE(mem4 != NULL);
+ ASSERT_TRUE(mem5 != NULL);
+ EXPECT_EQ(id3, id4);
+ EXPECT_EQ(id4, id5);
+ EXPECT_EQ(0u, offset4);
+ EXPECT_EQ(kSize / 2u, offset5);
+ manager_->Free(mem4);
+ manager_->Free(mem2);
+ manager_->Free(mem5);
+}
+
+TEST_F(MappedMemoryManagerTest, FreePendingToken) {
+ const unsigned int kSize = 128;
+ const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
+ CHECK(kAllocCount * kSize == kBufferSize * 2);
+
+ // Allocate several buffers across multiple chunks.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ int32 id = -1;
+ unsigned int offset = 0xFFFFFFFFu;
+ pointers[i] = manager_->Alloc(kSize, &id, &offset);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_NE(id, -1);
+ EXPECT_NE(offset, 0xFFFFFFFFu);
+ }
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ manager_->FreePendingToken(pointers[0], token);
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+ // Force it to read up to the token
+ helper_->Finish();
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // This allocation should use the spot just freed above.
+ int32 new_id = -1;
+ unsigned int new_offset = 0xFFFFFFFFu;
+ void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
+ EXPECT_TRUE(new_ptr);
+ EXPECT_EQ(new_ptr, pointers[0]);
+ EXPECT_NE(new_id, -1);
+ EXPECT_NE(new_offset, 0xFFFFFFFFu);
+
+ // Free up everything.
+ manager_->Free(new_ptr);
+ for (unsigned int i = 1; i < kAllocCount; ++i) {
+ manager_->Free(pointers[i]);
+ }
+}
+
+TEST_F(MappedMemoryManagerTest, FreeUnused) {
+ int32 id = -1;
+ unsigned int offset = 0xFFFFFFFFU;
+ void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
+ void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
+ ASSERT_TRUE(m1 != NULL);
+ ASSERT_TRUE(m2 != NULL);
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->Free(m2);
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(1u, manager_->num_chunks());
+ manager_->Free(m1);
+ EXPECT_EQ(1u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(0u, manager_->num_chunks());
+}
+
+TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
+ const unsigned int kSize = 1024;
+ manager_->set_chunk_size_multiple(kSize * 2);
+ // Check if we allocate less than the chunk size multiple we get
+ // chunks arounded up.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem1);
+ ASSERT_TRUE(mem2);
+ ASSERT_TRUE(mem3);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(id1, id2);
+ EXPECT_NE(id2, id3);
+ EXPECT_EQ(0u, offset1);
+ EXPECT_EQ(kSize, offset2);
+ EXPECT_EQ(0u, offset3);
+
+ manager_->Free(mem1);
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+}
+
+TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
+ const unsigned int kChunkSize = 2048;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate one chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(0u, offset2);
+
+ // Expect two chunks to be allocated, exceeding the limit,
+ // since all memory is in use.
+ EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
+
+ manager_->Free(mem1);
+ manager_->Free(mem2);
+}
+
+TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kSize));
+ const unsigned int kChunkSize = 2 * 1024;
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate half a chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(kSize, offset2);
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ manager_->FreePendingToken(mem2, token);
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // Since we didn't call helper_.finish() the token did not pass.
+ // We won't be able to claim the free memory without waiting and
+ // as we've already met the memory limit we'll have to wait
+ // on the token.
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem3);
+ EXPECT_NE(-1, id3);
+ // It will reuse the space from the second allocation just freed.
+ EXPECT_EQ(kSize, offset3);
+
+ // Expect one chunk to be allocated
+ EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
+
+ manager_->Free(mem1);
+ manager_->Free(mem3);
+}
+
+namespace {
+void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
+ std::list<void*>::iterator it = list->begin();
+ while (it != list->end()) {
+ void* address = *it;
+ test->manager()->Free(address);
+ it = list->erase(it);
+ }
+}
+}
+
+TEST_F(MappedMemoryManagerTest, Poll) {
+ std::list<void*> unmanaged_memory_list;
+
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(),
+ base::Bind(&Poll, this, &unmanaged_memory_list),
+ kSize));
+
+ // Allocate kSize bytes. Don't add the address to
+ // the unmanaged memory list, so that it won't be free:ed just yet.
+ int32 id1;
+ unsigned int offset1;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize);
+
+ // Allocate kSize more bytes, and make sure we grew.
+ int32 id2;
+ unsigned int offset2;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ // Make the unmanaged buffer be released next time FreeUnused() is called
+ // in MappedMemoryManager/FencedAllocator. This happens for example when
+ // allocating new memory.
+ unmanaged_memory_list.push_back(mem1);
+
+ // Allocate kSize more bytes. This should poll unmanaged memory, which now
+ // should free the previously allocated unmanaged memory.
+ int32 id3;
+ unsigned int offset3;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+ EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/program_info_manager.cc b/gpu/command_buffer/client/program_info_manager.cc
new file mode 100644
index 0000000..d854aa0
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager.cc
@@ -0,0 +1,526 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/program_info_manager.h"
+
+#include <map>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+class NonCachedProgramInfoManager : public ProgramInfoManager {
+ public:
+ NonCachedProgramInfoManager();
+ virtual ~NonCachedProgramInfoManager();
+
+ virtual void CreateInfo(GLuint program) OVERRIDE;
+
+ virtual void DeleteInfo(GLuint program) OVERRIDE;
+
+ virtual bool GetProgramiv(GLES2Implementation* gl,
+ GLuint program,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+ virtual GLint GetAttribLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual GLint GetUniformLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual bool GetActiveAttrib(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ virtual bool GetActiveUniform(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+};
+
+NonCachedProgramInfoManager::NonCachedProgramInfoManager() {
+}
+
+NonCachedProgramInfoManager::~NonCachedProgramInfoManager() {
+}
+
+void NonCachedProgramInfoManager::CreateInfo(GLuint /* program */) {
+}
+
+void NonCachedProgramInfoManager::DeleteInfo(GLuint /* program */) {
+}
+
+bool NonCachedProgramInfoManager::GetProgramiv(
+ GLES2Implementation* /* gl */,
+ GLuint /* program */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+ return false;
+}
+
+GLint NonCachedProgramInfoManager::GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ return gl->GetAttribLocationHelper(program, name);
+}
+
+GLint NonCachedProgramInfoManager::GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ return gl->GetUniformLocationHelper(program, name);
+}
+
+bool NonCachedProgramInfoManager::GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ return gl->GetActiveAttribHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+bool NonCachedProgramInfoManager::GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ return gl->GetActiveUniformHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+class CachedProgramInfoManager : public ProgramInfoManager {
+ public:
+ CachedProgramInfoManager();
+ virtual ~CachedProgramInfoManager();
+
+ virtual void CreateInfo(GLuint program) OVERRIDE;
+
+ virtual void DeleteInfo(GLuint program) OVERRIDE;
+
+ virtual bool GetProgramiv(GLES2Implementation* gl,
+ GLuint program,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+ virtual GLint GetAttribLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual GLint GetUniformLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual bool GetActiveAttrib(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ virtual bool GetActiveUniform(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ private:
+ class Program {
+ public:
+ struct UniformInfo {
+ UniformInfo(GLsizei _size, GLenum _type, const std::string& _name);
+
+ GLsizei size;
+ GLenum type;
+ bool is_array;
+ std::string name;
+ std::vector<GLint> element_locations;
+ };
+ struct VertexAttrib {
+ VertexAttrib(GLsizei _size, GLenum _type, const std::string& _name,
+ GLint _location)
+ : size(_size),
+ type(_type),
+ location(_location),
+ name(_name) {
+ }
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ std::string name;
+ };
+
+ typedef std::vector<UniformInfo> UniformInfoVector;
+ typedef std::vector<VertexAttrib> AttribInfoVector;
+
+ Program();
+
+ const AttribInfoVector& GetAttribInfos() const {
+ return attrib_infos_;
+ }
+
+ const VertexAttrib* GetAttribInfo(GLint index) const {
+ return (static_cast<size_t>(index) < attrib_infos_.size()) ?
+ &attrib_infos_[index] : NULL;
+ }
+
+ GLint GetAttribLocation(const std::string& name) const;
+
+ const UniformInfo* GetUniformInfo(GLint index) const {
+ return (static_cast<size_t>(index) < uniform_infos_.size()) ?
+ &uniform_infos_[index] : NULL;
+ }
+
+ // Gets the location of a uniform by name.
+ GLint GetUniformLocation(const std::string& name) const;
+
+ bool GetProgramiv(GLenum pname, GLint* params);
+
+ // Updates the program info after a successful link.
+ void Update(GLES2Implementation* gl, GLuint program);
+
+ private:
+ bool cached_;
+
+ GLsizei max_attrib_name_length_;
+
+ // Attrib by index.
+ AttribInfoVector attrib_infos_;
+
+ GLsizei max_uniform_name_length_;
+
+ // Uniform info by index.
+ UniformInfoVector uniform_infos_;
+
+ // This is true if glLinkProgram was successful last time it was called.
+ bool link_status_;
+ };
+
+ Program* GetProgramInfo(GLES2Implementation* gl, GLuint program);
+
+ // TODO(gman): Switch to a faster container.
+ typedef std::map<GLuint, Program> ProgramInfoMap;
+
+ ProgramInfoMap program_infos_;
+
+ mutable base::Lock lock_;
+};
+
+CachedProgramInfoManager::Program::UniformInfo::UniformInfo(
+ GLsizei _size, GLenum _type, const std::string& _name)
+ : size(_size),
+ type(_type),
+ name(_name) {
+ is_array = (!name.empty() && name[name.size() - 1] == ']');
+ DCHECK(!(size > 1 && !is_array));
+}
+
+CachedProgramInfoManager::Program::Program()
+ : cached_(false),
+ max_attrib_name_length_(0),
+ max_uniform_name_length_(0),
+ link_status_(false) {
+}
+
+// TODO(gman): Add a faster lookup.
+GLint CachedProgramInfoManager::Program::GetAttribLocation(
+ const std::string& name) const {
+ for (GLuint ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ if (info.name == name) {
+ return info.location;
+ }
+ }
+ return -1;
+}
+
+GLint CachedProgramInfoManager::Program::GetUniformLocation(
+ const std::string& name) const {
+ bool getting_array_location = false;
+ size_t open_pos = std::string::npos;
+ int index = 0;
+ if (!GLES2Util::ParseUniformName(
+ name, &open_pos, &index, &getting_array_location)) {
+ return -1;
+ }
+ for (GLuint ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.name == name ||
+ (info.is_array &&
+ info.name.compare(0, info.name.size() - 3, name) == 0)) {
+ return info.element_locations[0];
+ } else if (getting_array_location && info.is_array) {
+ // Look for an array specification.
+ size_t open_pos_2 = info.name.find_last_of('[');
+ if (open_pos_2 == open_pos &&
+ name.compare(0, open_pos, info.name, 0, open_pos) == 0) {
+ if (index >= 0 && index < info.size) {
+ return info.element_locations[index];
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+bool CachedProgramInfoManager::Program::GetProgramiv(
+ GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_LINK_STATUS:
+ *params = link_status_;
+ return true;
+ case GL_ACTIVE_ATTRIBUTES:
+ *params = attrib_infos_.size();
+ return true;
+ case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+ *params = max_attrib_name_length_;
+ return true;
+ case GL_ACTIVE_UNIFORMS:
+ *params = uniform_infos_.size();
+ return true;
+ case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+ *params = max_uniform_name_length_;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+template<typename T> static T LocalGetAs(
+ const std::vector<int8>& data, uint32 offset, size_t size) {
+ const int8* p = &data[0] + offset;
+ if (offset + size > data.size()) {
+ NOTREACHED();
+ return NULL;
+ }
+ return static_cast<T>(static_cast<const void*>(p));
+}
+
+void CachedProgramInfoManager::Program::Update(
+ GLES2Implementation* gl, GLuint program) {
+ if (cached_) {
+ return;
+ }
+ std::vector<int8> result;
+ gl->GetProgramInfoCHROMIUMHelper(program, &result);
+ if (result.empty()) {
+ // This should only happen on a lost context.
+ return;
+ }
+ DCHECK_GE(result.size(), sizeof(ProgramInfoHeader));
+ const ProgramInfoHeader* header = LocalGetAs<const ProgramInfoHeader*>(
+ result, 0, sizeof(header));
+ link_status_ = header->link_status != 0;
+ if (!link_status_) {
+ return;
+ }
+ attrib_infos_.clear();
+ uniform_infos_.clear();
+ max_attrib_name_length_ = 0;
+ max_uniform_name_length_ = 0;
+ const ProgramInput* inputs = LocalGetAs<const ProgramInput*>(
+ result, sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ const ProgramInput* input = inputs;
+ for (uint32 ii = 0; ii < header->num_attribs; ++ii) {
+ const int32* location = LocalGetAs<const int32*>(
+ result, input->location_offset, sizeof(int32));
+ const char* name_buf = LocalGetAs<const char*>(
+ result, input->name_offset, input->name_length);
+ std::string name(name_buf, input->name_length);
+ attrib_infos_.push_back(
+ VertexAttrib(input->size, input->type, name, *location));
+ max_attrib_name_length_ = std::max(
+ static_cast<GLsizei>(name.size() + 1), max_attrib_name_length_);
+ ++input;
+ }
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const int32* locations = LocalGetAs<const int32*>(
+ result, input->location_offset, sizeof(int32) * input->size);
+ const char* name_buf = LocalGetAs<const char*>(
+ result, input->name_offset, input->name_length);
+ std::string name(name_buf, input->name_length);
+ UniformInfo info(input->size, input->type, name);
+ max_uniform_name_length_ = std::max(
+ static_cast<GLsizei>(name.size() + 1), max_uniform_name_length_);
+ for (int32 jj = 0; jj < input->size; ++jj) {
+ info.element_locations.push_back(locations[jj]);
+ }
+ uniform_infos_.push_back(info);
+ ++input;
+ }
+ DCHECK_EQ(header->num_attribs + header->num_uniforms,
+ static_cast<uint32>(input - inputs));
+ cached_ = true;
+}
+
+CachedProgramInfoManager::CachedProgramInfoManager() {
+}
+
+CachedProgramInfoManager::~CachedProgramInfoManager() {
+
+}
+
+CachedProgramInfoManager::Program*
+ CachedProgramInfoManager::GetProgramInfo(
+ GLES2Implementation* gl, GLuint program) {
+ lock_.AssertAcquired();
+ ProgramInfoMap::iterator it = program_infos_.find(program);
+ if (it == program_infos_.end()) {
+ return NULL;
+ }
+ Program* info = &it->second;
+ info->Update(gl, program);
+ return info;
+}
+
+void CachedProgramInfoManager::CreateInfo(GLuint program) {
+ base::AutoLock auto_lock(lock_);
+ program_infos_.erase(program);
+ std::pair<ProgramInfoMap::iterator, bool> result =
+ program_infos_.insert(std::make_pair(program, Program()));
+
+ DCHECK(result.second);
+}
+
+void CachedProgramInfoManager::DeleteInfo(GLuint program) {
+ base::AutoLock auto_lock(lock_);
+ program_infos_.erase(program);
+}
+
+bool CachedProgramInfoManager::GetProgramiv(
+ GLES2Implementation* gl, GLuint program, GLenum pname, GLint* params) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (!info) {
+ return false;
+ }
+ return info->GetProgramiv(pname, params);
+}
+
+GLint CachedProgramInfoManager::GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ return info->GetAttribLocation(name);
+ }
+ return gl->GetAttribLocationHelper(program, name);
+}
+
+GLint CachedProgramInfoManager::GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ return info->GetUniformLocation(name);
+ }
+ return gl->GetUniformLocationHelper(program, name);
+}
+
+bool CachedProgramInfoManager::GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ const Program::VertexAttrib* attrib_info =
+ info->GetAttribInfo(index);
+ if (attrib_info) {
+ if (size) {
+ *size = attrib_info->size;
+ }
+ if (type) {
+ *type = attrib_info->type;
+ }
+ if (length || name) {
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ attrib_info->name.size()));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, attrib_info->name.c_str(), max_size);
+ name[max_size] = '\0';
+ }
+ }
+ return true;
+ }
+ }
+ return gl->GetActiveAttribHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+bool CachedProgramInfoManager::GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ const Program::UniformInfo* uniform_info = info->GetUniformInfo(index);
+ if (uniform_info) {
+ if (size) {
+ *size = uniform_info->size;
+ }
+ if (type) {
+ *type = uniform_info->type;
+ }
+ if (length || name) {
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ uniform_info->name.size()));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, uniform_info->name.c_str(), max_size);
+ name[max_size] = '\0';
+ }
+ }
+ return true;
+ }
+ }
+ return gl->GetActiveUniformHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+ProgramInfoManager::ProgramInfoManager() {
+}
+
+ProgramInfoManager::~ProgramInfoManager() {
+}
+
+ProgramInfoManager* ProgramInfoManager::Create(
+ bool shared_resources_across_processes) {
+ if (shared_resources_across_processes) {
+ return new NonCachedProgramInfoManager();
+ } else {
+ return new CachedProgramInfoManager();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/client/program_info_manager.h b/gpu/command_buffer/client/program_info_manager.h
new file mode 100644
index 0000000..099f182
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
+
+#include <GLES2/gl2.h>
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+
+// Manages info about OpenGL ES Programs.
+class GLES2_IMPL_EXPORT ProgramInfoManager {
+ public:
+ virtual ~ProgramInfoManager();
+
+ static ProgramInfoManager* Create(bool shared_resources_across_processes);
+
+ virtual void CreateInfo(GLuint program) = 0;
+
+ virtual void DeleteInfo(GLuint program) = 0;
+
+ virtual bool GetProgramiv(
+ GLES2Implementation* gl, GLuint program, GLenum pname, GLint* params) = 0;
+
+ virtual GLint GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) = 0;
+
+ virtual GLint GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) = 0;
+
+ virtual bool GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) = 0;
+
+ virtual bool GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) = 0;
+
+ protected:
+ ProgramInfoManager();
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
diff --git a/gpu/command_buffer/client/program_info_manager_unittest.cc b/gpu/command_buffer/client/program_info_manager_unittest.cc
new file mode 100644
index 0000000..e5002fd
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramInfoManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+
+ scoped_ptr<ProgramInfoManager> program_info_manager_;
+};
+
+TEST_F(ProgramInfoManagerTest, Basic) {
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/query_tracker.cc b/gpu/command_buffer/client/query_tracker.cc
new file mode 100644
index 0000000..c12d975
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker.cc
@@ -0,0 +1,261 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/query_tracker.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/atomicops.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/time.h"
+
+namespace gpu {
+namespace gles2 {
+
+QuerySyncManager::QuerySyncManager(MappedMemoryManager* manager)
+ : mapped_memory_(manager) {
+ DCHECK(manager);
+}
+
+QuerySyncManager::~QuerySyncManager() {
+ while (!buckets_.empty()) {
+ mapped_memory_->Free(buckets_.front()->syncs);
+ delete buckets_.front();
+ buckets_.pop_front();
+ }
+}
+
+bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) {
+ DCHECK(info);
+ if (free_queries_.empty()) {
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(
+ kSyncsPerBucket * sizeof(QuerySync), &shm_id, &shm_offset);
+ if (!mem) {
+ return false;
+ }
+ QuerySync* syncs = static_cast<QuerySync*>(mem);
+ Bucket* bucket = new Bucket(syncs);
+ buckets_.push_back(bucket);
+ for (size_t ii = 0; ii < kSyncsPerBucket; ++ii) {
+ free_queries_.push_back(QueryInfo(bucket, shm_id, shm_offset, syncs));
+ ++syncs;
+ shm_offset += sizeof(*syncs);
+ }
+ }
+ *info = free_queries_.front();
+ ++(info->bucket->used_query_count);
+ info->sync->Reset();
+ free_queries_.pop_front();
+ return true;
+}
+
+void QuerySyncManager::Free(const QuerySyncManager::QueryInfo& info) {
+ DCHECK_GT(info.bucket->used_query_count, 0u);
+ --(info.bucket->used_query_count);
+ free_queries_.push_back(info);
+}
+
+void QuerySyncManager::Shrink() {
+ std::deque<QueryInfo> new_queue;
+ while (!free_queries_.empty()) {
+ if (free_queries_.front().bucket->used_query_count)
+ new_queue.push_back(free_queries_.front());
+ free_queries_.pop_front();
+ }
+ free_queries_.swap(new_queue);
+
+ std::deque<Bucket*> new_buckets;
+ while (!buckets_.empty()) {
+ Bucket* bucket = buckets_.front();
+ if (bucket->used_query_count) {
+ new_buckets.push_back(bucket);
+ } else {
+ mapped_memory_->Free(bucket->syncs);
+ delete bucket;
+ }
+ buckets_.pop_front();
+ }
+ buckets_.swap(new_buckets);
+}
+
+QueryTracker::Query::Query(GLuint id, GLenum target,
+ const QuerySyncManager::QueryInfo& info)
+ : id_(id),
+ target_(target),
+ info_(info),
+ state_(kUninitialized),
+ submit_count_(0),
+ token_(0),
+ flush_count_(0),
+ client_begin_time_us_(0),
+ result_(0) {
+ }
+
+
+void QueryTracker::Query::Begin(GLES2Implementation* gl) {
+ // init memory, inc count
+ MarkAsActive();
+
+ switch (target()) {
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ // To nothing on begin for error queries.
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ client_begin_time_us_ = MicrosecondsSinceOriginOfTime();
+ // tell service about id, shared memory and count
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ default:
+ // tell service about id, shared memory and count
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ break;
+ }
+}
+
+void QueryTracker::Query::End(GLES2Implementation* gl) {
+ switch (target()) {
+ case GL_GET_ERROR_QUERY_CHROMIUM: {
+ GLenum error = gl->GetClientSideGLError();
+ if (error == GL_NO_ERROR) {
+ // There was no error so start the query on the service.
+ // it will end immediately.
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ } else {
+ // There's an error on the client, no need to bother the service. Just
+ // set the query as completed and return the error.
+ if (error != GL_NO_ERROR) {
+ state_ = kComplete;
+ result_ = error;
+ return;
+ }
+ }
+ }
+ }
+ flush_count_ = gl->helper()->flush_generation();
+ gl->helper()->EndQueryEXT(target(), submit_count());
+ MarkAsPending(gl->helper()->InsertToken());
+}
+
+bool QueryTracker::Query::CheckResultsAvailable(
+ CommandBufferHelper* helper) {
+ if (Pending()) {
+ if (base::subtle::Acquire_Load(&info_.sync->process_count) ==
+ submit_count_ ||
+ helper->IsContextLost()) {
+ switch (target()) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ result_ = std::min(info_.sync->result,
+ static_cast<uint64>(0xFFFFFFFFL));
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ // Disabled DCHECK because of http://crbug.com/419236.
+ //DCHECK(info_.sync->result >= client_begin_time_us_);
+ result_ = std::min(info_.sync->result - client_begin_time_us_,
+ static_cast<uint64>(0xFFFFFFFFL));
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ default:
+ result_ = info_.sync->result;
+ break;
+ }
+ state_ = kComplete;
+ } else {
+ if ((helper->flush_generation() - flush_count_ - 1) >= 0x80000000) {
+ helper->Flush();
+ } else {
+ // Insert no-ops so that eventually the GPU process will see more work.
+ helper->Noop(1);
+ }
+ }
+ }
+ return state_ == kComplete;
+}
+
+uint32 QueryTracker::Query::GetResult() const {
+ DCHECK(state_ == kComplete || state_ == kUninitialized);
+ return result_;
+}
+
+QueryTracker::QueryTracker(MappedMemoryManager* manager)
+ : query_sync_manager_(manager) {
+}
+
+QueryTracker::~QueryTracker() {
+ while (!queries_.empty()) {
+ delete queries_.begin()->second;
+ queries_.erase(queries_.begin());
+ }
+ while (!removed_queries_.empty()) {
+ delete removed_queries_.front();
+ removed_queries_.pop_front();
+ }
+}
+
+QueryTracker::Query* QueryTracker::CreateQuery(GLuint id, GLenum target) {
+ DCHECK_NE(0u, id);
+ FreeCompletedQueries();
+ QuerySyncManager::QueryInfo info;
+ if (!query_sync_manager_.Alloc(&info)) {
+ return NULL;
+ }
+ Query* query = new Query(id, target, info);
+ std::pair<QueryMap::iterator, bool> result =
+ queries_.insert(std::make_pair(id, query));
+ DCHECK(result.second);
+ return query;
+}
+
+QueryTracker::Query* QueryTracker::GetQuery(
+ GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ return it != queries_.end() ? it->second : NULL;
+}
+
+void QueryTracker::RemoveQuery(GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ if (it != queries_.end()) {
+ Query* query = it->second;
+ // When you delete a query you can't mark its memory as unused until it's
+ // completed.
+ // Note: If you don't do this you won't mess up the service but you will
+ // mess up yourself.
+ removed_queries_.push_back(query);
+ queries_.erase(it);
+ FreeCompletedQueries();
+ }
+}
+
+void QueryTracker::Shrink() {
+ FreeCompletedQueries();
+ query_sync_manager_.Shrink();
+}
+
+void QueryTracker::FreeCompletedQueries() {
+ QueryList::iterator it = removed_queries_.begin();
+ while (it != removed_queries_.end()) {
+ Query* query = *it;
+ if (query->Pending() &&
+ base::subtle::Acquire_Load(&query->info_.sync->process_count) !=
+ query->submit_count()) {
+ ++it;
+ continue;
+ }
+
+ query_sync_manager_.Free(query->info_);
+ it = removed_queries_.erase(it);
+ delete query;
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/query_tracker.h b/gpu/command_buffer/client/query_tracker.h
new file mode 100644
index 0000000..72e29e7
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
+
+#include <GLES2/gl2.h>
+
+#include <deque>
+#include <list>
+
+#include "base/atomicops.h"
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+class MappedMemoryManager;
+
+namespace gles2 {
+
+class GLES2Implementation;
+
+// Manages buckets of QuerySync instances in mapped memory.
+class GLES2_IMPL_EXPORT QuerySyncManager {
+ public:
+ static const size_t kSyncsPerBucket = 4096;
+
+ struct Bucket {
+ explicit Bucket(QuerySync* sync_mem)
+ : syncs(sync_mem),
+ used_query_count(0) {
+ }
+ QuerySync* syncs;
+ unsigned used_query_count;
+ };
+ struct QueryInfo {
+ QueryInfo(Bucket* bucket, int32 id, uint32 offset, QuerySync* sync_mem)
+ : bucket(bucket),
+ shm_id(id),
+ shm_offset(offset),
+ sync(sync_mem) {
+ }
+
+ QueryInfo()
+ : bucket(NULL),
+ shm_id(0),
+ shm_offset(0),
+ sync(NULL) {
+ }
+
+ Bucket* bucket;
+ int32 shm_id;
+ uint32 shm_offset;
+ QuerySync* sync;
+ };
+
+ explicit QuerySyncManager(MappedMemoryManager* manager);
+ ~QuerySyncManager();
+
+ bool Alloc(QueryInfo* info);
+ void Free(const QueryInfo& sync);
+ void Shrink();
+
+ private:
+ MappedMemoryManager* mapped_memory_;
+ std::deque<Bucket*> buckets_;
+ std::deque<QueryInfo> free_queries_;
+
+ DISALLOW_COPY_AND_ASSIGN(QuerySyncManager);
+};
+
+// Tracks queries for client side of command buffer.
+class GLES2_IMPL_EXPORT QueryTracker {
+ public:
+ class GLES2_IMPL_EXPORT Query {
+ public:
+ enum State {
+ kUninitialized, // never used
+ kActive, // between begin - end
+ kPending, // not yet complete
+ kComplete // completed
+ };
+
+ Query(GLuint id, GLenum target, const QuerySyncManager::QueryInfo& info);
+
+ GLenum target() const {
+ return target_;
+ }
+
+ GLenum id() const {
+ return id_;
+ }
+
+ int32 shm_id() const {
+ return info_.shm_id;
+ }
+
+ uint32 shm_offset() const {
+ return info_.shm_offset;
+ }
+
+ void MarkAsActive() {
+ state_ = kActive;
+ ++submit_count_;
+ if (submit_count_ == INT_MAX)
+ submit_count_ = 1;
+ }
+
+ void MarkAsPending(int32 token) {
+ token_ = token;
+ state_ = kPending;
+ }
+
+ base::subtle::Atomic32 submit_count() const { return submit_count_; }
+
+ int32 token() const {
+ return token_;
+ }
+
+ bool NeverUsed() const {
+ return state_ == kUninitialized;
+ }
+
+ bool Pending() const {
+ return state_ == kPending;
+ }
+
+ bool CheckResultsAvailable(CommandBufferHelper* helper);
+
+ uint32 GetResult() const;
+
+ void Begin(GLES2Implementation* gl);
+ void End(GLES2Implementation* gl);
+
+ private:
+ friend class QueryTracker;
+ friend class QueryTrackerTest;
+
+ GLuint id_;
+ GLenum target_;
+ QuerySyncManager::QueryInfo info_;
+ State state_;
+ base::subtle::Atomic32 submit_count_;
+ int32 token_;
+ uint32 flush_count_;
+ uint64 client_begin_time_us_; // Only used for latency query target.
+ uint32 result_;
+ };
+
+ QueryTracker(MappedMemoryManager* manager);
+ ~QueryTracker();
+
+ Query* CreateQuery(GLuint id, GLenum target);
+ Query* GetQuery(GLuint id);
+ void RemoveQuery(GLuint id);
+ void Shrink();
+ void FreeCompletedQueries();
+
+ private:
+ typedef base::hash_map<GLuint, Query*> QueryMap;
+ typedef std::list<Query*> QueryList;
+
+ QueryMap queries_;
+ QueryList removed_queries_;
+ QuerySyncManager query_sync_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(QueryTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
diff --git a/gpu/command_buffer/client/query_tracker_unittest.cc b/gpu/command_buffer/client/query_tracker_unittest.cc
new file mode 100644
index 0000000..cd2ccf6
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -0,0 +1,238 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the QueryTracker.
+
+#include "gpu/command_buffer/client/query_tracker.h"
+
+#include <GLES2/gl2ext.h>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+class QuerySyncManagerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBuffer());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ sync_manager_.reset(new QuerySyncManager(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ sync_manager_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ scoped_ptr<CommandBuffer> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<QuerySyncManager> sync_manager_;
+};
+
+TEST_F(QuerySyncManagerTest, Basic) {
+ QuerySyncManager::QueryInfo infos[4];
+ memset(&infos, 0xBD, sizeof(infos));
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
+ EXPECT_NE(0, infos[ii].shm_id);
+ ASSERT_TRUE(infos[ii].sync != NULL);
+ EXPECT_EQ(0, infos[ii].sync->process_count);
+ EXPECT_EQ(0u, infos[ii].sync->result);
+ }
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ sync_manager_->Free(infos[ii]);
+ }
+}
+
+TEST_F(QuerySyncManagerTest, DontFree) {
+ QuerySyncManager::QueryInfo infos[4];
+ memset(&infos, 0xBD, sizeof(infos));
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
+ }
+}
+
+class QueryTrackerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBuffer());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ query_tracker_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ QuerySync* GetSync(QueryTracker::Query* query) {
+ return query->info_.sync;
+ }
+
+ QuerySyncManager::Bucket* GetBucket(QueryTracker::Query* query) {
+ return query->info_.bucket;
+ }
+
+ uint32 GetFlushGeneration() { return helper_->flush_generation(); }
+
+ scoped_ptr<CommandBuffer> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<QueryTracker> query_tracker_;
+};
+
+TEST_F(QueryTrackerTest, Basic) {
+ const GLuint kId1 = 123;
+ const GLuint kId2 = 124;
+
+ // Check we can create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+ // Check we can get the same Query.
+ EXPECT_EQ(query, query_tracker_->GetQuery(kId1));
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId2) == NULL);
+ // Check we can delete the query.
+ query_tracker_->RemoveQuery(kId1);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId1) == NULL);
+}
+
+TEST_F(QueryTrackerTest, Query) {
+ const GLuint kId1 = 123;
+ const int32 kToken = 46;
+ const uint32 kResult = 456;
+
+ // Create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_TRUE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+ EXPECT_EQ(0, query->token());
+ EXPECT_EQ(0, query->submit_count());
+
+ // Check MarkAsActive.
+ query->MarkAsActive();
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+ EXPECT_EQ(0, query->token());
+ EXPECT_EQ(1, query->submit_count());
+
+ // Check MarkAsPending.
+ query->MarkAsPending(kToken);
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_TRUE(query->Pending());
+ EXPECT_EQ(kToken, query->token());
+ EXPECT_EQ(1, query->submit_count());
+
+ // Check CheckResultsAvailable.
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_TRUE(query->Pending());
+
+ // Flush only once if no more flushes happened between a call to
+ // EndQuery command and CheckResultsAvailable
+ // Advance put_ so flush calls in CheckResultsAvailable go through
+ // and updates flush_generation count
+ helper_->Noop(1);
+ // Set Query in pending state_ to simulate EndQuery command is called
+ query->MarkAsPending(kToken);
+ EXPECT_TRUE(query->Pending());
+ // Store FlushGeneration count after EndQuery is called
+ uint32 gen1 = GetFlushGeneration();
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ uint32 gen2 = GetFlushGeneration();
+ EXPECT_NE(gen1, gen2);
+ // Repeated calls to CheckResultsAvailable should not flush unnecessarily
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ gen1 = GetFlushGeneration();
+ EXPECT_EQ(gen1, gen2);
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ gen1 = GetFlushGeneration();
+ EXPECT_EQ(gen1, gen2);
+
+ // Simulate GPU process marking it as available.
+ QuerySync* sync = GetSync(query);
+ sync->process_count = query->submit_count();
+ sync->result = kResult;
+
+ // Check CheckResultsAvailable.
+ EXPECT_TRUE(query->CheckResultsAvailable(helper_.get()));
+ EXPECT_EQ(kResult, query->GetResult());
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+}
+
+TEST_F(QueryTrackerTest, Remove) {
+ const GLuint kId1 = 123;
+ const int32 kToken = 46;
+ const uint32 kResult = 456;
+
+ // Create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+
+ QuerySyncManager::Bucket* bucket = GetBucket(query);
+ EXPECT_EQ(1u, bucket->used_query_count);
+
+ query->MarkAsActive();
+ query->MarkAsPending(kToken);
+
+ query_tracker_->RemoveQuery(kId1);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId1) == NULL);
+
+ // Check that memory was not freed.
+ EXPECT_EQ(1u, bucket->used_query_count);
+
+ // Simulate GPU process marking it as available.
+ QuerySync* sync = GetSync(query);
+ sync->process_count = query->submit_count();
+ sync->result = kResult;
+
+ // Check FreeCompletedQueries.
+ query_tracker_->FreeCompletedQueries();
+ EXPECT_EQ(0u, bucket->used_query_count);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/ref_counted.h b/gpu/command_buffer/client/ref_counted.h
new file mode 100644
index 0000000..79ee8bc
--- /dev/null
+++ b/gpu/command_buffer/client/ref_counted.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
+#define GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
+
+#include "base/memory/ref_counted.h"
+namespace gpu {
+using base::RefCountedThreadSafe;
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
diff --git a/gpu/command_buffer/client/ring_buffer.cc b/gpu/command_buffer/client/ring_buffer.cc
new file mode 100644
index 0000000..813bb34
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the RingBuffer class.
+
+#include "gpu/command_buffer/client/ring_buffer.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+RingBuffer::RingBuffer(unsigned int alignment, Offset base_offset,
+ unsigned int size, CommandBufferHelper* helper,
+ void* base)
+ : helper_(helper),
+ base_offset_(base_offset),
+ size_(size),
+ free_offset_(0),
+ in_use_offset_(0),
+ alignment_(alignment),
+ base_(static_cast<int8*>(base) - base_offset) {
+}
+
+RingBuffer::~RingBuffer() {
+ // Free blocks pending tokens.
+ while (!blocks_.empty()) {
+ FreeOldestBlock();
+ }
+}
+
+void RingBuffer::FreeOldestBlock() {
+ DCHECK(!blocks_.empty()) << "no free blocks";
+ Block& block = blocks_.front();
+ DCHECK(block.state != IN_USE)
+ << "attempt to allocate more than maximum memory";
+ if (block.state == FREE_PENDING_TOKEN) {
+ helper_->WaitForToken(block.token);
+ }
+ in_use_offset_ += block.size;
+ if (in_use_offset_ == size_) {
+ in_use_offset_ = 0;
+ }
+ // If they match then the entire buffer is free.
+ if (in_use_offset_ == free_offset_) {
+ in_use_offset_ = 0;
+ free_offset_ = 0;
+ }
+ blocks_.pop_front();
+}
+
+void* RingBuffer::Alloc(unsigned int size) {
+ DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory";
+ DCHECK(blocks_.empty() || blocks_.back().state != IN_USE)
+ << "Attempt to alloc another block before freeing the previous.";
+ // Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
+ // return different pointers every time.
+ if (size == 0) size = 1;
+ // Allocate rounded to alignment size so that the offsets are always
+ // memory-aligned.
+ size = RoundToAlignment(size);
+
+ // Wait until there is enough room.
+ while (size > GetLargestFreeSizeNoWaiting()) {
+ FreeOldestBlock();
+ }
+
+ if (size + free_offset_ > size_) {
+ // Add padding to fill space before wrapping around
+ blocks_.push_back(Block(free_offset_, size_ - free_offset_, PADDING));
+ free_offset_ = 0;
+ }
+
+ Offset offset = free_offset_;
+ blocks_.push_back(Block(offset, size, IN_USE));
+ free_offset_ += size;
+ if (free_offset_ == size_) {
+ free_offset_ = 0;
+ }
+ return GetPointer(offset + base_offset_);
+}
+
+void RingBuffer::FreePendingToken(void* pointer,
+ unsigned int token) {
+ Offset offset = GetOffset(pointer);
+ offset -= base_offset_;
+ DCHECK(!blocks_.empty()) << "no allocations to free";
+ for (Container::reverse_iterator it = blocks_.rbegin();
+ it != blocks_.rend();
+ ++it) {
+ Block& block = *it;
+ if (block.offset == offset) {
+ DCHECK(block.state == IN_USE)
+ << "block that corresponds to offset already freed";
+ block.token = token;
+ block.state = FREE_PENDING_TOKEN;
+ return;
+ }
+ }
+ NOTREACHED() << "attempt to free non-existant block";
+}
+
+unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
+ unsigned int last_token_read = helper_->last_token_read();
+ while (!blocks_.empty()) {
+ Block& block = blocks_.front();
+ if (block.token > last_token_read || block.state == IN_USE) break;
+ FreeOldestBlock();
+ }
+ if (free_offset_ == in_use_offset_) {
+ if (blocks_.empty()) {
+ // The entire buffer is free.
+ DCHECK_EQ(free_offset_, 0u);
+ return size_;
+ } else {
+ // The entire buffer is in use.
+ return 0;
+ }
+ } else if (free_offset_ > in_use_offset_) {
+ // It's free from free_offset_ to size_ and from 0 to in_use_offset_
+ return std::max(size_ - free_offset_, in_use_offset_);
+ } else {
+ // It's free from free_offset_ -> in_use_offset_;
+ return in_use_offset_ - free_offset_;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/ring_buffer.h b/gpu/command_buffer/client/ring_buffer.h
new file mode 100644
index 0000000..dfe16f7
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the RingBuffer class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+
+#include <deque>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+class CommandBufferHelper;
+
+// RingBuffer manages a piece of memory as a ring buffer. Memory is allocated
+// with Alloc and then a is freed pending a token with FreePendingToken. Old
+// allocations must not be kept past new allocations.
+class GPU_EXPORT RingBuffer {
+ public:
+ typedef unsigned int Offset;
+
+ // Creates a RingBuffer.
+ // Parameters:
+ // alignment: Alignment for allocations.
+ // base_offset: The offset of the start of the buffer.
+ // size: The size of the buffer in bytes.
+ // helper: A CommandBufferHelper for dealing with tokens.
+ // base: The physical address that corresponds to base_offset.
+ RingBuffer(unsigned int alignment, Offset base_offset,
+ unsigned int size, CommandBufferHelper* helper, void* base);
+
+ ~RingBuffer();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block.
+ void* Alloc(unsigned int size);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, unsigned int token);
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeNoWaiting();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return size_;
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ void* GetPointer(RingBuffer::Offset offset) const {
+ return static_cast<int8*>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ RingBuffer::Offset GetOffset(void* pointer) const {
+ return static_cast<int8*>(pointer) - static_cast<int8*>(base_);
+ }
+
+ // Rounds the given size to the alignment in use.
+ unsigned int RoundToAlignment(unsigned int size) {
+ return (size + alignment_ - 1) & ~(alignment_ - 1);
+ }
+
+
+ private:
+ enum State {
+ IN_USE,
+ PADDING,
+ FREE_PENDING_TOKEN
+ };
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ Block(Offset _offset, unsigned int _size, State _state)
+ : offset(_offset),
+ size(_size),
+ token(0),
+ state(_state) {
+ }
+ Offset offset;
+ unsigned int size;
+ unsigned int token; // token to wait for.
+ State state;
+ };
+
+ typedef std::deque<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ void FreeOldestBlock();
+
+ CommandBufferHelper* helper_;
+
+ // Used blocks are added to the end, blocks are freed from the beginning.
+ Container blocks_;
+
+ // The base offset of the ring buffer.
+ Offset base_offset_;
+
+ // The size of the ring buffer.
+ Offset size_;
+
+ // Offset of first free byte.
+ Offset free_offset_;
+
+ // Offset of first used byte.
+ // Range between in_use_mark and free_mark is in use.
+ Offset in_use_offset_;
+
+ // Alignment for allocations.
+ unsigned int alignment_;
+
+ // The physical address that corresponds to base_offset.
+ void* base_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RingBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc
new file mode 100644
index 0000000..b3aca13
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer_test.cc
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the RingBuffer class.
+
+#include "gpu/command_buffer/client/ring_buffer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class BaseRingBufferTest : public testing::Test {
+ protected:
+ static const unsigned int kBaseOffset = 128;
+ static const unsigned int kBufferSize = 1024;
+ static const unsigned int kAlignment = 4;
+
+ void RunPendingSetToken() {
+ for (std::vector<const void*>::iterator it = set_token_arguments_.begin();
+ it != set_token_arguments_.end();
+ ++it) {
+ api_mock_->SetToken(cmd::kSetToken, 1, *it);
+ }
+ set_token_arguments_.clear();
+ delay_set_token_ = false;
+ }
+
+ void SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args) {
+ EXPECT_EQ(static_cast<unsigned int>(cmd::kSetToken), command);
+ EXPECT_EQ(1u, arg_count);
+ if (delay_set_token_)
+ set_token_arguments_.push_back(_args);
+ else
+ api_mock_->SetToken(cmd::kSetToken, 1, _args);
+ }
+
+ virtual void SetUp() {
+ delay_set_token_ = false;
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(this, &BaseRingBufferTest::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ std::vector<const void*> set_token_arguments_;
+ bool delay_set_token_;
+
+ scoped_ptr<int8[]> buffer_;
+ int8* buffer_start_;
+};
+
+#ifndef _MSC_VER
+const unsigned int BaseRingBufferTest::kBaseOffset;
+const unsigned int BaseRingBufferTest::kBufferSize;
+#endif
+
+// Test fixture for RingBuffer test - Creates a RingBuffer, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class RingBufferTest : public BaseRingBufferTest {
+ protected:
+ virtual void SetUp() {
+ BaseRingBufferTest::SetUp();
+
+ buffer_.reset(new int8[kBufferSize + kBaseOffset]);
+ buffer_start_ = buffer_.get() + kBaseOffset;
+ allocator_.reset(new RingBuffer(kAlignment, kBaseOffset, kBufferSize,
+ helper_.get(), buffer_start_));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ BaseRingBufferTest::TearDown();
+ }
+
+ scoped_ptr<RingBuffer> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(RingBufferTest, TestBasic) {
+ const unsigned int kSize = 16;
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+ void* pointer = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize, allocator_->GetOffset(pointer) - kBaseOffset + kSize);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSizeNoWaiting());
+ int32 token = helper_->InsertToken();
+ allocator_->FreePendingToken(pointer, token);
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(RingBufferTest, TestFreePendingToken) {
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ delay_set_token_ = true;
+ // Allocate several buffers to fill in the memory.
+ int32 tokens[kAllocCount];
+ for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
+ void* pointer = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize,
+ allocator_->GetOffset(pointer) - kBaseOffset + kSize);
+ tokens[ii] = helper_->InsertToken();
+ allocator_->FreePendingToken(pointer, tokens[ii]);
+ }
+
+ EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
+ allocator_->GetLargestFreeSizeNoWaiting());
+
+ RunPendingSetToken();
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until a token is passed.
+ void* pointer1 = allocator_->Alloc(kSize);
+ EXPECT_EQ(kBaseOffset, allocator_->GetOffset(pointer1));
+
+ // Check that the token has indeed passed.
+ EXPECT_LE(tokens[0], GetToken());
+
+ allocator_->FreePendingToken(pointer1, helper_->InsertToken());
+}
+
+// Tests GetLargestFreeSizeNoWaiting
+TEST_F(RingBufferTest, TestGetLargestFreeSizeNoWaiting) {
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+
+ void* pointer = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_->InsertToken());
+}
+
+TEST_F(RingBufferTest, TestFreeBug) {
+ // The first and second allocations must not match.
+ const unsigned int kAlloc1 = 3*kAlignment;
+ const unsigned int kAlloc2 = 20;
+ void* pointer = allocator_->Alloc(kAlloc1);
+ EXPECT_EQ(kBufferSize - kAlloc1, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+ pointer = allocator_->Alloc(kAlloc2);
+ EXPECT_EQ(kBufferSize - kAlloc1 - kAlloc2,
+ allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+ pointer = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/share_group.cc b/gpu/command_buffer/client/share_group.cc
new file mode 100644
index 0000000..96ab8d1
--- /dev/null
+++ b/gpu/command_buffer/client/share_group.cc
@@ -0,0 +1,259 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stack>
+#include <vector>
+
+#include "gpu/command_buffer/client/share_group.h"
+
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+
+namespace gpu {
+namespace gles2 {
+
+ShareGroupContextData::IdHandlerData::IdHandlerData() : flush_generation_(0) {}
+ShareGroupContextData::IdHandlerData::~IdHandlerData() {}
+
+COMPILE_ASSERT(gpu::kInvalidResource == 0,
+ INVALID_RESOURCE_NOT_0_AS_GL_EXPECTS);
+
+// The standard id handler.
+class IdHandler : public IdHandlerInterface {
+ public:
+ IdHandler() { }
+ virtual ~IdHandler() { }
+
+ // Overridden from IdHandlerInterface.
+ virtual void MakeIds(
+ GLES2Implementation* /* gl_impl */,
+ GLuint id_offset, GLsizei n, GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ if (id_offset == 0) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = id_allocator_.AllocateID();
+ }
+ } else {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = id_allocator_.AllocateIDAtOrAbove(id_offset);
+ id_offset = ids[ii] + 1;
+ }
+ }
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl,
+ GLsizei n, const GLuint* ids, DeleteFn delete_fn) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ id_allocator_.FreeID(ids[ii]);
+ }
+
+ (gl_impl->*delete_fn)(n, ids);
+ // We need to ensure that the delete call is evaluated on the service side
+ // before any other contexts issue commands using these client ids.
+ // TODO(vmiura): Can remove this by virtualizing internal ids, however
+ // this code only affects PPAPI for now.
+ gl_impl->helper()->CommandBufferHelper::Flush();
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool MarkAsUsedForBind(GLuint id) OVERRIDE {
+ if (id == 0)
+ return true;
+ base::AutoLock auto_lock(lock_);
+ return id_allocator_.MarkAsUsed(id);
+ }
+
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {}
+
+ private:
+ base::Lock lock_;
+ IdAllocator id_allocator_;
+};
+
+// An id handler that requires Gen before Bind.
+class StrictIdHandler : public IdHandlerInterface {
+ public:
+ explicit StrictIdHandler(int id_namespace) : id_namespace_(id_namespace) {}
+ virtual ~StrictIdHandler() {}
+
+ // Overridden from IdHandler.
+ virtual void MakeIds(GLES2Implementation* gl_impl,
+ GLuint /* id_offset */,
+ GLsizei n,
+ GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+
+ // Collect pending FreeIds from other flush_generation.
+ CollectPendingFreeIds(gl_impl);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (!free_ids_.empty()) {
+ // Allocate a previously freed Id.
+ ids[ii] = free_ids_.top();
+ free_ids_.pop();
+
+ // Record kIdInUse state.
+ DCHECK(id_states_[ids[ii] - 1] == kIdFree);
+ id_states_[ids[ii] - 1] = kIdInUse;
+ } else {
+ // Allocate a new Id.
+ id_states_.push_back(kIdInUse);
+ ids[ii] = id_states_.size();
+ }
+ }
+ }
+
+ // Overridden from IdHandler.
+ virtual bool FreeIds(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids,
+ DeleteFn delete_fn) OVERRIDE {
+
+ // Delete stub must run before CollectPendingFreeIds.
+ (gl_impl->*delete_fn)(n, ids);
+
+ {
+ base::AutoLock auto_lock(lock_);
+
+ // Collect pending FreeIds from other flush_generation.
+ CollectPendingFreeIds(gl_impl);
+
+ // Save Ids to free in a later flush_generation.
+ ShareGroupContextData::IdHandlerData* ctxt_data =
+ gl_impl->share_group_context_data()->id_handler_data(id_namespace_);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ GLuint id = ids[ii];
+ if (id != 0) {
+ // Save freed Id for later.
+ DCHECK(id_states_[id - 1] == kIdInUse);
+ id_states_[id - 1] = kIdPendingFree;
+ ctxt_data->freed_ids_.push_back(id);
+ }
+ }
+ }
+
+ return true;
+ }
+
+ // Overridden from IdHandler.
+ virtual bool MarkAsUsedForBind(GLuint id) OVERRIDE {
+#ifndef NDEBUG
+ if (id != 0) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(id_states_[id - 1] == kIdInUse);
+ }
+#endif
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ CollectPendingFreeIds(gl_impl);
+ }
+
+ private:
+ enum IdState { kIdFree, kIdPendingFree, kIdInUse };
+
+ void CollectPendingFreeIds(GLES2Implementation* gl_impl) {
+ uint32 flush_generation = gl_impl->helper()->flush_generation();
+ ShareGroupContextData::IdHandlerData* ctxt_data =
+ gl_impl->share_group_context_data()->id_handler_data(id_namespace_);
+
+ if (ctxt_data->flush_generation_ != flush_generation) {
+ ctxt_data->flush_generation_ = flush_generation;
+ for (uint32 ii = 0; ii < ctxt_data->freed_ids_.size(); ++ii) {
+ const GLuint id = ctxt_data->freed_ids_[ii];
+ DCHECK(id_states_[id - 1] == kIdPendingFree);
+ id_states_[id - 1] = kIdFree;
+ free_ids_.push(id);
+ }
+ ctxt_data->freed_ids_.clear();
+ }
+ }
+
+ int id_namespace_;
+
+ base::Lock lock_;
+ std::vector<uint8> id_states_;
+ std::stack<uint32> free_ids_;
+};
+
+// An id handler for ids that are never reused.
+class NonReusedIdHandler : public IdHandlerInterface {
+ public:
+ NonReusedIdHandler() : last_id_(0) {}
+ virtual ~NonReusedIdHandler() {}
+
+ // Overridden from IdHandlerInterface.
+ virtual void MakeIds(
+ GLES2Implementation* /* gl_impl */,
+ GLuint id_offset, GLsizei n, GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = ++last_id_ + id_offset;
+ }
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl,
+ GLsizei n, const GLuint* ids, DeleteFn delete_fn) OVERRIDE {
+ // Ids are never freed.
+ (gl_impl->*delete_fn)(n, ids);
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool MarkAsUsedForBind(GLuint /* id */) OVERRIDE {
+ // This is only used for Shaders and Programs which have no bind.
+ return false;
+ }
+
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {}
+
+ private:
+ base::Lock lock_;
+ GLuint last_id_;
+};
+
+ShareGroup::ShareGroup(bool bind_generates_resource)
+ : bind_generates_resource_(bind_generates_resource) {
+ if (bind_generates_resource) {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ if (i == id_namespaces::kProgramsAndShaders) {
+ id_handlers_[i].reset(new NonReusedIdHandler());
+ } else {
+ id_handlers_[i].reset(new IdHandler());
+ }
+ }
+ } else {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ if (i == id_namespaces::kProgramsAndShaders) {
+ id_handlers_[i].reset(new NonReusedIdHandler());
+ } else {
+ id_handlers_[i].reset(new StrictIdHandler(i));
+ }
+ }
+ }
+ program_info_manager_.reset(ProgramInfoManager::Create(false));
+}
+
+void ShareGroup::set_program_info_manager(ProgramInfoManager* manager) {
+ program_info_manager_.reset(manager);
+}
+
+ShareGroup::~ShareGroup() {}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/share_group.h b/gpu/command_buffer/client/share_group.h
new file mode 100644
index 0000000..c66704b
--- /dev/null
+++ b/gpu/command_buffer/client/share_group.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
+#define GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
+
+#include <GLES2/gl2.h>
+#include "base/memory/scoped_ptr.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/client/ref_counted.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+class GLES2ImplementationTest;
+class ProgramInfoManager;
+
+typedef void (GLES2Implementation::*DeleteFn)(GLsizei n, const GLuint* ids);
+
+class ShareGroupContextData {
+ public:
+ struct IdHandlerData {
+ IdHandlerData();
+ ~IdHandlerData();
+
+ std::vector<GLuint> freed_ids_;
+ uint32 flush_generation_;
+ };
+
+ IdHandlerData* id_handler_data(int namespace_id) {
+ return &id_handler_data_[namespace_id];
+ }
+
+ private:
+ IdHandlerData id_handler_data_[id_namespaces::kNumIdNamespaces];
+};
+
+// Base class for IdHandlers
+class IdHandlerInterface {
+ public:
+ IdHandlerInterface() { }
+ virtual ~IdHandlerInterface() { }
+
+ // Makes some ids at or above id_offset.
+ virtual void MakeIds(
+ GLES2Implementation* gl_impl,
+ GLuint id_offset, GLsizei n, GLuint* ids) = 0;
+
+ // Frees some ids.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl, GLsizei n, const GLuint* ids,
+ DeleteFn delete_fn) = 0;
+
+ // Marks an id as used for glBind functions. id = 0 does nothing.
+ virtual bool MarkAsUsedForBind(GLuint id) = 0;
+
+ // Called when a context in the share group is destructed.
+ virtual void FreeContext(GLES2Implementation* gl_impl) = 0;
+};
+
+// ShareGroup manages shared resources for contexts that are sharing resources.
+class GLES2_IMPL_EXPORT ShareGroup
+ : public gpu::RefCountedThreadSafe<ShareGroup> {
+ public:
+ ShareGroup(bool bind_generates_resource);
+
+ bool bind_generates_resource() const {
+ return bind_generates_resource_;
+ }
+
+ IdHandlerInterface* GetIdHandler(int namespace_id) const {
+ return id_handlers_[namespace_id].get();
+ }
+
+ ProgramInfoManager* program_info_manager() {
+ return program_info_manager_.get();
+ }
+
+ void FreeContext(GLES2Implementation* gl_impl) {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ id_handlers_[i]->FreeContext(gl_impl);
+ }
+ }
+
+ private:
+ friend class gpu::RefCountedThreadSafe<ShareGroup>;
+ friend class gpu::gles2::GLES2ImplementationTest;
+ ~ShareGroup();
+
+ // Install a new program info manager. Used for testing only;
+ void set_program_info_manager(ProgramInfoManager* manager);
+
+ scoped_ptr<IdHandlerInterface> id_handlers_[id_namespaces::kNumIdNamespaces];
+ scoped_ptr<ProgramInfoManager> program_info_manager_;
+
+ bool bind_generates_resource_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShareGroup);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
diff --git a/gpu/command_buffer/client/transfer_buffer.cc b/gpu/command_buffer/client/transfer_buffer.cc
new file mode 100644
index 0000000..da00d87
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class to Manage a growing transfer buffer.
+
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+#include "base/bits.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+TransferBuffer::TransferBuffer(
+ CommandBufferHelper* helper)
+ : helper_(helper),
+ result_size_(0),
+ default_buffer_size_(0),
+ min_buffer_size_(0),
+ max_buffer_size_(0),
+ alignment_(0),
+ size_to_flush_(0),
+ bytes_since_last_flush_(0),
+ buffer_id_(-1),
+ result_buffer_(NULL),
+ result_shm_offset_(0),
+ usable_(true) {
+}
+
+TransferBuffer::~TransferBuffer() {
+ Free();
+}
+
+bool TransferBuffer::Initialize(
+ unsigned int default_buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) {
+ result_size_ = result_size;
+ default_buffer_size_ = default_buffer_size;
+ min_buffer_size_ = min_buffer_size;
+ max_buffer_size_ = max_buffer_size;
+ alignment_ = alignment;
+ size_to_flush_ = size_to_flush;
+ ReallocateRingBuffer(default_buffer_size_ - result_size);
+ return HaveBuffer();
+}
+
+void TransferBuffer::Free() {
+ if (HaveBuffer()) {
+ TRACE_EVENT0("gpu", "TransferBuffer::Free");
+ helper_->Finish();
+ helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
+ buffer_id_ = -1;
+ buffer_ = NULL;
+ result_buffer_ = NULL;
+ result_shm_offset_ = 0;
+ ring_buffer_.reset();
+ bytes_since_last_flush_ = 0;
+ }
+}
+
+bool TransferBuffer::HaveBuffer() const {
+ DCHECK(buffer_id_ == -1 || buffer_.get());
+ return buffer_id_ != -1;
+}
+
+RingBuffer::Offset TransferBuffer::GetOffset(void* pointer) const {
+ return ring_buffer_->GetOffset(pointer);
+}
+
+void TransferBuffer::FreePendingToken(void* p, unsigned int token) {
+ ring_buffer_->FreePendingToken(p, token);
+ if (bytes_since_last_flush_ >= size_to_flush_ && size_to_flush_ > 0) {
+ helper_->Flush();
+ bytes_since_last_flush_ = 0;
+ }
+}
+
+void TransferBuffer::AllocateRingBuffer(unsigned int size) {
+ for (;size >= min_buffer_size_; size /= 2) {
+ int32 id = -1;
+ scoped_refptr<gpu::Buffer> buffer =
+ helper_->command_buffer()->CreateTransferBuffer(size, &id);
+ if (id != -1) {
+ DCHECK(buffer.get());
+ buffer_ = buffer;
+ ring_buffer_.reset(new RingBuffer(
+ alignment_,
+ result_size_,
+ buffer_->size() - result_size_,
+ helper_,
+ static_cast<char*>(buffer_->memory()) + result_size_));
+ buffer_id_ = id;
+ result_buffer_ = buffer_->memory();
+ result_shm_offset_ = 0;
+ return;
+ }
+ // we failed so don't try larger than this.
+ max_buffer_size_ = size / 2;
+ }
+ usable_ = false;
+}
+
+static unsigned int ComputePOTSize(unsigned int dimension) {
+ return (dimension == 0) ? 0 : 1 << base::bits::Log2Ceiling(dimension);
+}
+
+void TransferBuffer::ReallocateRingBuffer(unsigned int size) {
+ // What size buffer would we ask for if we needed a new one?
+ unsigned int needed_buffer_size = ComputePOTSize(size + result_size_);
+ needed_buffer_size = std::max(needed_buffer_size, min_buffer_size_);
+ needed_buffer_size = std::max(needed_buffer_size, default_buffer_size_);
+ needed_buffer_size = std::min(needed_buffer_size, max_buffer_size_);
+
+ if (usable_ && (!HaveBuffer() || needed_buffer_size > buffer_->size())) {
+ if (HaveBuffer()) {
+ Free();
+ }
+ AllocateRingBuffer(needed_buffer_size);
+ }
+}
+
+void* TransferBuffer::AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) {
+ DCHECK(size_allocated);
+
+ ReallocateRingBuffer(size);
+
+ if (!HaveBuffer()) {
+ return NULL;
+ }
+
+ unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
+ *size_allocated = std::min(max_size, size);
+ bytes_since_last_flush_ += *size_allocated;
+ return ring_buffer_->Alloc(*size_allocated);
+}
+
+void* TransferBuffer::Alloc(unsigned int size) {
+ ReallocateRingBuffer(size);
+
+ if (!HaveBuffer()) {
+ return NULL;
+ }
+
+ unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
+ if (size > max_size) {
+ return NULL;
+ }
+
+ bytes_since_last_flush_ += size;
+ return ring_buffer_->Alloc(size);
+}
+
+void* TransferBuffer::GetResultBuffer() {
+ ReallocateRingBuffer(result_size_);
+ return result_buffer_;
+}
+
+int TransferBuffer::GetResultOffset() {
+ ReallocateRingBuffer(result_size_);
+ return result_shm_offset_;
+}
+
+int TransferBuffer::GetShmId() {
+ ReallocateRingBuffer(result_size_);
+ return buffer_id_;
+}
+
+unsigned int TransferBuffer::GetCurrentMaxAllocationWithoutRealloc() const {
+ return HaveBuffer() ? ring_buffer_->GetLargestFreeOrPendingSize() : 0;
+}
+
+unsigned int TransferBuffer::GetMaxAllocation() const {
+ return HaveBuffer() ? max_buffer_size_ - result_size_ : 0;
+}
+
+void ScopedTransferBufferPtr::Release() {
+ if (buffer_) {
+ transfer_buffer_->FreePendingToken(buffer_, helper_->InsertToken());
+ buffer_ = NULL;
+ size_ = 0;
+ }
+}
+
+void ScopedTransferBufferPtr::Reset(unsigned int new_size) {
+ Release();
+ // NOTE: we allocate buffers of size 0 so that HaveBuffer will be true, so
+ // that address will return a pointer just like malloc, and so that GetShmId
+ // will be valid. That has the side effect that we'll insert a token on free.
+ // We could add code skip the token for a zero size buffer but it doesn't seem
+ // worth the complication.
+ buffer_ = transfer_buffer_->AllocUpTo(new_size, &size_);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/transfer_buffer.h b/gpu/command_buffer/client/transfer_buffer.h
new file mode 100644
index 0000000..348ad32
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer.h
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+// Interface for managing the transfer buffer.
+class GPU_EXPORT TransferBufferInterface {
+ public:
+ TransferBufferInterface() { }
+ virtual ~TransferBufferInterface() { }
+
+ virtual bool Initialize(
+ unsigned int buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) = 0;
+
+ virtual int GetShmId() = 0;
+ virtual void* GetResultBuffer() = 0;
+ virtual int GetResultOffset() = 0;
+
+ virtual void Free() = 0;
+
+ virtual bool HaveBuffer() const = 0;
+
+ // Allocates up to size bytes.
+ virtual void* AllocUpTo(unsigned int size, unsigned int* size_allocated) = 0;
+
+ // Allocates size bytes.
+ // Note: Alloc will fail if it can not return size bytes.
+ virtual void* Alloc(unsigned int size) = 0;
+
+ virtual RingBuffer::Offset GetOffset(void* pointer) const = 0;
+
+ virtual void FreePendingToken(void* p, unsigned int token) = 0;
+};
+
+// Class that manages the transfer buffer.
+class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
+ public:
+ TransferBuffer(CommandBufferHelper* helper);
+ virtual ~TransferBuffer();
+
+ // Overridden from TransferBufferInterface.
+ virtual bool Initialize(
+ unsigned int default_buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) OVERRIDE;
+ virtual int GetShmId() OVERRIDE;
+ virtual void* GetResultBuffer() OVERRIDE;
+ virtual int GetResultOffset() OVERRIDE;
+ virtual void Free() OVERRIDE;
+ virtual bool HaveBuffer() const OVERRIDE;
+ virtual void* AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) OVERRIDE;
+ virtual void* Alloc(unsigned int size) OVERRIDE;
+ virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE;
+ virtual void FreePendingToken(void* p, unsigned int token) OVERRIDE;
+
+ // These are for testing.
+ unsigned int GetCurrentMaxAllocationWithoutRealloc() const;
+ unsigned int GetMaxAllocation() const;
+
+ private:
+ // Tries to reallocate the ring buffer if it's not large enough for size.
+ void ReallocateRingBuffer(unsigned int size);
+
+ void AllocateRingBuffer(unsigned int size);
+
+ CommandBufferHelper* helper_;
+ scoped_ptr<RingBuffer> ring_buffer_;
+
+ // size reserved for results
+ unsigned int result_size_;
+
+ // default size. Size we want when starting or re-allocating
+ unsigned int default_buffer_size_;
+
+ // min size we'll consider successful
+ unsigned int min_buffer_size_;
+
+ // max size we'll let the buffer grow
+ unsigned int max_buffer_size_;
+
+ // alignment for allocations
+ unsigned int alignment_;
+
+ // Size at which to do an async flush. 0 = never.
+ unsigned int size_to_flush_;
+
+ // Number of bytes since we last flushed.
+ unsigned int bytes_since_last_flush_;
+
+ // the current buffer.
+ scoped_refptr<gpu::Buffer> buffer_;
+
+ // id of buffer. -1 = no buffer
+ int32 buffer_id_;
+
+ // address of result area
+ void* result_buffer_;
+
+ // offset to result area
+ uint32 result_shm_offset_;
+
+ // false if we failed to allocate min_buffer_size
+ bool usable_;
+};
+
+// A class that will manage the lifetime of a transferbuffer allocation.
+class GPU_EXPORT ScopedTransferBufferPtr {
+ public:
+ ScopedTransferBufferPtr(
+ unsigned int size,
+ CommandBufferHelper* helper,
+ TransferBufferInterface* transfer_buffer)
+ : buffer_(NULL),
+ size_(0),
+ helper_(helper),
+ transfer_buffer_(transfer_buffer) {
+ Reset(size);
+ }
+
+ ~ScopedTransferBufferPtr() {
+ Release();
+ }
+
+ bool valid() const {
+ return buffer_ != NULL;
+ }
+
+ unsigned int size() const {
+ return size_;
+ }
+
+ int shm_id() const {
+ return transfer_buffer_->GetShmId();
+ }
+
+ RingBuffer::Offset offset() const {
+ return transfer_buffer_->GetOffset(buffer_);
+ }
+
+ void* address() const {
+ return buffer_;
+ }
+
+ void Release();
+
+ void Reset(unsigned int new_size);
+
+ private:
+ void* buffer_;
+ unsigned int size_;
+ CommandBufferHelper* helper_;
+ TransferBufferInterface* transfer_buffer_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTransferBufferPtr);
+};
+
+template <typename T>
+class ScopedTransferBufferArray : public ScopedTransferBufferPtr {
+ public:
+ ScopedTransferBufferArray(
+ unsigned int num_elements,
+ CommandBufferHelper* helper, TransferBufferInterface* transfer_buffer)
+ : ScopedTransferBufferPtr(
+ num_elements * sizeof(T), helper, transfer_buffer) {
+ }
+
+ T* elements() {
+ return static_cast<T*>(address());
+ }
+
+ unsigned int num_elements() const {
+ return size() / sizeof(T);
+ }
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
diff --git a/gpu/command_buffer/client/transfer_buffer_unittest.cc b/gpu/command_buffer/client/transfer_buffer_unittest.cc
new file mode 100644
index 0000000..cb8558f
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -0,0 +1,485 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::AtMost;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::StrictMock;
+
+namespace gpu {
+
+
+class TransferBufferTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const unsigned int kStartingOffset = 64;
+ static const unsigned int kAlignment = 4;
+ static const size_t kTransferBufferSize = 256;
+
+ TransferBufferTest()
+ : transfer_buffer_id_(0) {
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ virtual void Initialize(unsigned int size_to_flush) {
+ ASSERT_TRUE(transfer_buffer_->Initialize(
+ kTransferBufferSize,
+ kStartingOffset,
+ kTransferBufferSize,
+ kTransferBufferSize,
+ kAlignment,
+ size_to_flush));
+ }
+
+ MockClientCommandBufferMockFlush* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ scoped_ptr<MockClientCommandBufferMockFlush> command_buffer_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ int32 transfer_buffer_id_;
+};
+
+void TransferBufferTest::SetUp() {
+ command_buffer_.reset(new StrictMock<MockClientCommandBufferMockFlush>());
+ ASSERT_TRUE(command_buffer_->Initialize());
+
+ helper_.reset(new CommandBufferHelper(command_buffer()));
+ ASSERT_TRUE(helper_->Initialize(kCommandBufferSizeBytes));
+
+ transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
+
+ transfer_buffer_.reset(new TransferBuffer(helper_.get()));
+}
+
+void TransferBufferTest::TearDown() {
+ if (transfer_buffer_->HaveBuffer()) {
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(), OnFlush()).Times(AtMost(1));
+ transfer_buffer_.reset();
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 TransferBufferTest::kNumCommandEntries;
+const int32 TransferBufferTest::kCommandBufferSizeBytes;
+const unsigned int TransferBufferTest::kStartingOffset;
+const unsigned int TransferBufferTest::kAlignment;
+const size_t TransferBufferTest::kTransferBufferSize;
+#endif
+
+TEST_F(TransferBufferTest, Basic) {
+ Initialize(0);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_EQ(
+ kTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+}
+
+TEST_F(TransferBufferTest, Free) {
+ Initialize(0);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ EXPECT_TRUE(transfer_buffer_->GetResultBuffer() != NULL);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ unsigned int size = 0;
+ void* data = transfer_buffer_->AllocUpTo(1, &size);
+ EXPECT_TRUE(data != NULL);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ transfer_buffer_->FreePendingToken(data, 1);
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ transfer_buffer_->GetResultOffset();
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ EXPECT_EQ(
+ kTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ // Test freeing twice.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ transfer_buffer_->Free();
+}
+
+TEST_F(TransferBufferTest, TooLargeAllocation) {
+ Initialize(0);
+ // Check that we can't allocate large than max size.
+ void* ptr = transfer_buffer_->Alloc(kTransferBufferSize + 1);
+ EXPECT_TRUE(ptr == NULL);
+ // Check we if we try to allocate larger than max we get max.
+ unsigned int size_allocated = 0;
+ ptr = transfer_buffer_->AllocUpTo(
+ kTransferBufferSize + 1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kTransferBufferSize - kStartingOffset, size_allocated);
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) {
+ Initialize(32u);
+ void* ptr = transfer_buffer_->Alloc(0);
+ EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ transfer_buffer_->FreePendingToken(ptr, static_cast<unsigned int>(-1));
+ // Check that the pointer is aligned on the following allocation.
+ ptr = transfer_buffer_->Alloc(4);
+ EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferTest, Flush) {
+ Initialize(16u);
+ unsigned int size_allocated = 0;
+ for (int i = 0; i < 8; ++i) {
+ void* ptr = transfer_buffer_->AllocUpTo(8u, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(8u, size_allocated);
+ if (i % 2) {
+ EXPECT_CALL(*command_buffer(), Flush(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
+ }
+ for (int i = 0; i < 8; ++i) {
+ void* ptr = transfer_buffer_->Alloc(8u);
+ ASSERT_TRUE(ptr != NULL);
+ if (i % 2) {
+ EXPECT_CALL(*command_buffer(), Flush(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
+ }
+}
+
+class MockClientCommandBufferCanFail : public MockClientCommandBufferMockFlush {
+ public:
+ MockClientCommandBufferCanFail() {
+ }
+ virtual ~MockClientCommandBufferCanFail() {
+ }
+
+ MOCK_METHOD2(CreateTransferBuffer,
+ scoped_refptr<Buffer>(size_t size, int32* id));
+
+ scoped_refptr<gpu::Buffer> RealCreateTransferBuffer(size_t size, int32* id) {
+ return MockCommandBufferBase::CreateTransferBuffer(size, id);
+ }
+};
+
+class TransferBufferExpandContractTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const unsigned int kStartingOffset = 64;
+ static const unsigned int kAlignment = 4;
+ static const size_t kStartTransferBufferSize = 256;
+ static const size_t kMaxTransferBufferSize = 1024;
+ static const size_t kMinTransferBufferSize = 128;
+
+ TransferBufferExpandContractTest()
+ : transfer_buffer_id_(0) {
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ MockClientCommandBufferCanFail* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ scoped_ptr<MockClientCommandBufferCanFail> command_buffer_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ int32 transfer_buffer_id_;
+};
+
+void TransferBufferExpandContractTest::SetUp() {
+ command_buffer_.reset(new StrictMock<MockClientCommandBufferCanFail>());
+ ASSERT_TRUE(command_buffer_->Initialize());
+
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kCommandBufferSizeBytes, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ helper_.reset(new CommandBufferHelper(command_buffer()));
+ ASSERT_TRUE(helper_->Initialize(kCommandBufferSizeBytes));
+
+ transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
+
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ transfer_buffer_.reset(new TransferBuffer(helper_.get()));
+ ASSERT_TRUE(transfer_buffer_->Initialize(
+ kStartTransferBufferSize,
+ kStartingOffset,
+ kMinTransferBufferSize,
+ kMaxTransferBufferSize,
+ kAlignment,
+ 0));
+}
+
+void TransferBufferExpandContractTest::TearDown() {
+ if (transfer_buffer_->HaveBuffer()) {
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_.reset();
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 TransferBufferExpandContractTest::kNumCommandEntries;
+const int32 TransferBufferExpandContractTest::kCommandBufferSizeBytes;
+const unsigned int TransferBufferExpandContractTest::kStartingOffset;
+const unsigned int TransferBufferExpandContractTest::kAlignment;
+const size_t TransferBufferExpandContractTest::kStartTransferBufferSize;
+const size_t TransferBufferExpandContractTest::kMaxTransferBufferSize;
+const size_t TransferBufferExpandContractTest::kMinTransferBufferSize;
+#endif
+
+TEST_F(TransferBufferExpandContractTest, Expand) {
+ // Check it starts at starting size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize * 2, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ // Try next power of 2.
+ const size_t kSize1 = 512 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize1, size_allocated);
+ EXPECT_EQ(kSize1, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMaxTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ // Try next power of 2.
+ const size_t kSize2 = 1024 - kStartingOffset;
+ ptr = transfer_buffer_->AllocUpTo(kSize2, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ // Try next one more. Should not go past max.
+ size_allocated = 0;
+ const size_t kSize3 = kSize2 + 1;
+ ptr = transfer_buffer_->AllocUpTo(kSize3, &size_allocated);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferExpandContractTest, Contract) {
+ // Check it starts at starting size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again, fail first request
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMinTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ const size_t kSize1 = 256 - kStartingOffset;
+ const size_t kSize2 = 128 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again,
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMinTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again, fail both requests.
+ EXPECT_CALL(*command_buffer(), CreateTransferBuffer(_, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .RetiresOnSaturation();
+
+ const size_t kSize1 = 512 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr == NULL);
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+}
+
+TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // See that it gets reallocated.
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Check it's the default size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+}
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager.cc b/gpu/command_buffer/client/vertex_array_object_manager.cc
new file mode 100644
index 0000000..3e98bd0
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager.cc
@@ -0,0 +1,640 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+
+#include "base/logging.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#if defined(__native_client__) && !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+static GLsizei RoundUpToMultipleOf4(GLsizei size) {
+ return (size + 3) & ~3;
+}
+
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+// A 32-bit and 64-bit compatible way of converting a pointer to a GLuint.
+static GLuint ToGLuint(const void* ptr) {
+ return static_cast<GLuint>(reinterpret_cast<size_t>(ptr));
+}
+
+// This class tracks VertexAttribPointers and helps emulate client side buffers.
+//
+// The way client side buffers work is we shadow all the Vertex Attribs so we
+// know which ones are pointing to client side buffers.
+//
+// At Draw time, for any attribs pointing to client side buffers we copy them
+// to a special VBO and reset the actual vertex attrib pointers to point to this
+// VBO.
+//
+// This also means we have to catch calls to query those values so that when
+// an attrib is a client side buffer we pass the info back the user expects.
+
+class GLES2_IMPL_EXPORT VertexArrayObject {
+ public:
+ // Info about Vertex Attributes. This is used to track what the user currently
+ // has bound on each Vertex Attribute so we can simulate client side buffers
+ // at glDrawXXX time.
+ class VertexAttrib {
+ public:
+ VertexAttrib()
+ : enabled_(false),
+ buffer_id_(0),
+ size_(4),
+ type_(GL_FLOAT),
+ normalized_(GL_FALSE),
+ pointer_(NULL),
+ gl_stride_(0),
+ divisor_(0) {
+ }
+
+ bool enabled() const {
+ return enabled_;
+ }
+
+ void set_enabled(bool enabled) {
+ enabled_ = enabled;
+ }
+
+ GLuint buffer_id() const {
+ return buffer_id_;
+ }
+
+ void set_buffer_id(GLuint id) {
+ buffer_id_ = id;
+ }
+
+ GLenum type() const {
+ return type_;
+ }
+
+ GLint size() const {
+ return size_;
+ }
+
+ GLsizei stride() const {
+ return gl_stride_;
+ }
+
+ GLboolean normalized() const {
+ return normalized_;
+ }
+
+ const GLvoid* pointer() const {
+ return pointer_;
+ }
+
+ bool IsClientSide() const {
+ return buffer_id_ == 0;
+ }
+
+ GLuint divisor() const {
+ return divisor_;
+ }
+
+ void SetInfo(
+ GLuint buffer_id,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ const GLvoid* pointer) {
+ buffer_id_ = buffer_id;
+ size_ = size;
+ type_ = type;
+ normalized_ = normalized;
+ gl_stride_ = gl_stride;
+ pointer_ = pointer;
+ }
+
+ void SetDivisor(GLuint divisor) {
+ divisor_ = divisor;
+ }
+
+ private:
+ // Whether or not this attribute is enabled.
+ bool enabled_;
+
+ // The id of the buffer. 0 = client side buffer.
+ GLuint buffer_id_;
+
+ // Number of components (1, 2, 3, 4).
+ GLint size_;
+
+ // GL_BYTE, GL_FLOAT, etc. See glVertexAttribPointer.
+ GLenum type_;
+
+ // GL_TRUE or GL_FALSE
+ GLboolean normalized_;
+
+ // The pointer/offset into the buffer.
+ const GLvoid* pointer_;
+
+ // The stride that will be used to access the buffer. This is the bogus GL
+ // stride where 0 = compute the stride based on size and type.
+ GLsizei gl_stride_;
+
+ // Divisor, for geometry instancing.
+ GLuint divisor_;
+ };
+
+ typedef std::vector<VertexAttrib> VertexAttribs;
+
+ explicit VertexArrayObject(GLuint max_vertex_attribs);
+
+ void UnbindBuffer(GLuint id);
+
+ bool BindElementArray(GLuint id);
+
+ bool HaveEnabledClientSideBuffers() const;
+
+ void SetAttribEnable(GLuint index, bool enabled);
+
+ void SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride,
+ const void* ptr);
+
+ bool GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) const;
+
+ void SetAttribDivisor(GLuint index, GLuint divisor);
+
+ bool GetAttribPointer(GLuint index, GLenum pname, void** ptr) const;
+
+ const VertexAttribs& vertex_attribs() const {
+ return vertex_attribs_;
+ }
+
+ GLuint bound_element_array_buffer() const {
+ return bound_element_array_buffer_id_;
+ }
+
+ private:
+ const VertexAttrib* GetAttrib(GLuint index) const;
+
+ int num_client_side_pointers_enabled_;
+
+ // The currently bound element array buffer.
+ GLuint bound_element_array_buffer_id_;
+
+ VertexAttribs vertex_attribs_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayObject);
+};
+
+VertexArrayObject::VertexArrayObject(GLuint max_vertex_attribs)
+ : num_client_side_pointers_enabled_(0),
+ bound_element_array_buffer_id_(0) {
+ vertex_attribs_.resize(max_vertex_attribs);
+}
+
+void VertexArrayObject::UnbindBuffer(GLuint id) {
+ if (id == 0) {
+ return;
+ }
+ for (size_t ii = 0; ii < vertex_attribs_.size(); ++ii) {
+ VertexAttrib& attrib = vertex_attribs_[ii];
+ if (attrib.buffer_id() == id) {
+ attrib.set_buffer_id(0);
+ if (attrib.enabled()) {
+ ++num_client_side_pointers_enabled_;
+ }
+ }
+ }
+ if (bound_element_array_buffer_id_ == id) {
+ bound_element_array_buffer_id_ = 0;
+ }
+}
+
+bool VertexArrayObject::BindElementArray(GLuint id) {
+ if (id == bound_element_array_buffer_id_) {
+ return false;
+ }
+ bound_element_array_buffer_id_ = id;
+ return true;
+}
+bool VertexArrayObject::HaveEnabledClientSideBuffers() const {
+ return num_client_side_pointers_enabled_ > 0;
+}
+
+void VertexArrayObject::SetAttribEnable(GLuint index, bool enabled) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ if (attrib.enabled() != enabled) {
+ if (attrib.IsClientSide()) {
+ num_client_side_pointers_enabled_ += enabled ? 1 : -1;
+ DCHECK_GE(num_client_side_pointers_enabled_, 0);
+ }
+ attrib.set_enabled(enabled);
+ }
+ }
+}
+
+void VertexArrayObject::SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ --num_client_side_pointers_enabled_;
+ DCHECK_GE(num_client_side_pointers_enabled_, 0);
+ }
+
+ attrib.SetInfo(buffer_id, size, type, normalized, stride, ptr);
+
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ ++num_client_side_pointers_enabled_;
+ }
+ }
+}
+
+bool VertexArrayObject::GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) const {
+ const VertexAttrib* attrib = GetAttrib(index);
+ if (!attrib) {
+ return false;
+ }
+
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
+ *param = attrib->buffer_id();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ *param = attrib->enabled();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ *param = attrib->size();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ *param = attrib->stride();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ *param = attrib->type();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ *param = attrib->normalized();
+ break;
+ default:
+ return false; // pass through to service side.
+ break;
+ }
+ return true;
+}
+
+void VertexArrayObject::SetAttribDivisor(GLuint index, GLuint divisor) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ attrib.SetDivisor(divisor);
+ }
+}
+
+// Gets the Attrib pointer for an attrib but only if it's a client side
+// pointer. Returns true if it got the pointer.
+bool VertexArrayObject::GetAttribPointer(
+ GLuint index, GLenum pname, void** ptr) const {
+ const VertexAttrib* attrib = GetAttrib(index);
+ if (attrib && pname == GL_VERTEX_ATTRIB_ARRAY_POINTER) {
+ *ptr = const_cast<void*>(attrib->pointer());
+ return true;
+ }
+ return false;
+}
+
+// Gets an attrib if it's in range and it's client side.
+const VertexArrayObject::VertexAttrib* VertexArrayObject::GetAttrib(
+ GLuint index) const {
+ if (index < vertex_attribs_.size()) {
+ const VertexAttrib* attrib = &vertex_attribs_[index];
+ return attrib;
+ }
+ return NULL;
+}
+
+VertexArrayObjectManager::VertexArrayObjectManager(
+ GLuint max_vertex_attribs,
+ GLuint array_buffer_id,
+ GLuint element_array_buffer_id)
+ : max_vertex_attribs_(max_vertex_attribs),
+ array_buffer_id_(array_buffer_id),
+ array_buffer_size_(0),
+ array_buffer_offset_(0),
+ element_array_buffer_id_(element_array_buffer_id),
+ element_array_buffer_size_(0),
+ collection_buffer_size_(0),
+ default_vertex_array_object_(new VertexArrayObject(max_vertex_attribs)),
+ bound_vertex_array_object_(default_vertex_array_object_) {
+}
+
+VertexArrayObjectManager::~VertexArrayObjectManager() {
+ for (VertexArrayObjectMap::iterator it = vertex_array_objects_.begin();
+ it != vertex_array_objects_.end(); ++it) {
+ delete it->second;
+ }
+ delete default_vertex_array_object_;
+}
+
+bool VertexArrayObjectManager::IsReservedId(GLuint id) const {
+ return (id != 0 &&
+ (id == array_buffer_id_ || id == element_array_buffer_id_));
+}
+
+GLuint VertexArrayObjectManager::bound_element_array_buffer() const {
+ return bound_vertex_array_object_->bound_element_array_buffer();
+}
+
+void VertexArrayObjectManager::UnbindBuffer(GLuint id) {
+ bound_vertex_array_object_->UnbindBuffer(id);
+}
+
+bool VertexArrayObjectManager::BindElementArray(GLuint id) {
+ return bound_vertex_array_object_->BindElementArray(id);
+}
+
+void VertexArrayObjectManager::GenVertexArrays(
+ GLsizei n, const GLuint* arrays) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ std::pair<VertexArrayObjectMap::iterator, bool> result =
+ vertex_array_objects_.insert(std::make_pair(
+ arrays[i], new VertexArrayObject(max_vertex_attribs_)));
+ DCHECK(result.second);
+ }
+}
+
+void VertexArrayObjectManager::DeleteVertexArrays(
+ GLsizei n, const GLuint* arrays) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ GLuint id = arrays[i];
+ if (id) {
+ VertexArrayObjectMap::iterator it = vertex_array_objects_.find(id);
+ if (it != vertex_array_objects_.end()) {
+ if (bound_vertex_array_object_ == it->second) {
+ bound_vertex_array_object_ = default_vertex_array_object_;
+ }
+ delete it->second;
+ vertex_array_objects_.erase(it);
+ }
+ }
+ }
+}
+
+bool VertexArrayObjectManager::BindVertexArray(GLuint array, bool* changed) {
+ *changed = false;
+ VertexArrayObject* vertex_array_object = default_vertex_array_object_;
+ if (array != 0) {
+ VertexArrayObjectMap::iterator it = vertex_array_objects_.find(array);
+ if (it == vertex_array_objects_.end()) {
+ return false;
+ }
+ vertex_array_object = it->second;
+ }
+ *changed = vertex_array_object != bound_vertex_array_object_;
+ bound_vertex_array_object_ = vertex_array_object;
+ return true;
+}
+
+bool VertexArrayObjectManager::HaveEnabledClientSideBuffers() const {
+ return bound_vertex_array_object_->HaveEnabledClientSideBuffers();
+}
+
+void VertexArrayObjectManager::SetAttribEnable(GLuint index, bool enabled) {
+ bound_vertex_array_object_->SetAttribEnable(index, enabled);
+}
+
+bool VertexArrayObjectManager::GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) {
+ return bound_vertex_array_object_->GetVertexAttrib(index, pname, param);
+}
+
+bool VertexArrayObjectManager::GetAttribPointer(
+ GLuint index, GLenum pname, void** ptr) const {
+ return bound_vertex_array_object_->GetAttribPointer(index, pname, ptr);
+}
+
+bool VertexArrayObjectManager::SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ // Client side arrays are not allowed in vaos.
+ if (buffer_id == 0 && !IsDefaultVAOBound()) {
+ return false;
+ }
+ bound_vertex_array_object_->SetAttribPointer(
+ buffer_id, index, size, type, normalized, stride, ptr);
+ return true;
+}
+
+void VertexArrayObjectManager::SetAttribDivisor(GLuint index, GLuint divisor) {
+ bound_vertex_array_object_->SetAttribDivisor(index, divisor);
+}
+
+// Collects the data into the collection buffer and returns the number of
+// bytes collected.
+GLsizei VertexArrayObjectManager::CollectData(
+ const void* data,
+ GLsizei bytes_per_element,
+ GLsizei real_stride,
+ GLsizei num_elements) {
+ GLsizei bytes_needed = bytes_per_element * num_elements;
+ if (collection_buffer_size_ < bytes_needed) {
+ collection_buffer_.reset(new int8[bytes_needed]);
+ collection_buffer_size_ = bytes_needed;
+ }
+ const int8* src = static_cast<const int8*>(data);
+ int8* dst = collection_buffer_.get();
+ int8* end = dst + bytes_per_element * num_elements;
+ for (; dst < end; src += real_stride, dst += bytes_per_element) {
+ memcpy(dst, src, bytes_per_element);
+ }
+ return bytes_needed;
+}
+
+bool VertexArrayObjectManager::IsDefaultVAOBound() const {
+ return bound_vertex_array_object_ == default_vertex_array_object_;
+}
+
+// Returns true if buffers were setup.
+bool VertexArrayObjectManager::SetupSimulatedClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei num_elements,
+ GLsizei primcount,
+ bool* simulated) {
+ *simulated = false;
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (!bound_vertex_array_object_->HaveEnabledClientSideBuffers()) {
+ return true;
+ }
+ if (!IsDefaultVAOBound()) {
+ gl->SetGLError(
+ GL_INVALID_OPERATION, function_name,
+ "client side arrays not allowed with vertex array object");
+ return false;
+ }
+ *simulated = true;
+ GLsizei total_size = 0;
+ // Compute the size of the buffer we need.
+ const VertexArrayObject::VertexAttribs& vertex_attribs =
+ bound_vertex_array_object_->vertex_attribs();
+ for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
+ const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ size_t bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(attrib.type()) *
+ attrib.size();
+ GLsizei elements = (primcount && attrib.divisor() > 0) ?
+ ((primcount - 1) / attrib.divisor() + 1) : num_elements;
+ total_size += RoundUpToMultipleOf4(bytes_per_element * elements);
+ }
+ }
+ gl_helper->BindBuffer(GL_ARRAY_BUFFER, array_buffer_id_);
+ array_buffer_offset_ = 0;
+ if (total_size > array_buffer_size_) {
+ gl->BufferDataHelper(GL_ARRAY_BUFFER, total_size, NULL, GL_DYNAMIC_DRAW);
+ array_buffer_size_ = total_size;
+ }
+ for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
+ const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ size_t bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(attrib.type()) *
+ attrib.size();
+ GLsizei real_stride = attrib.stride() ?
+ attrib.stride() : static_cast<GLsizei>(bytes_per_element);
+ GLsizei elements = (primcount && attrib.divisor() > 0) ?
+ ((primcount - 1) / attrib.divisor() + 1) : num_elements;
+ GLsizei bytes_collected = CollectData(
+ attrib.pointer(), bytes_per_element, real_stride, elements);
+ gl->BufferSubDataHelper(
+ GL_ARRAY_BUFFER, array_buffer_offset_, bytes_collected,
+ collection_buffer_.get());
+ gl_helper->VertexAttribPointer(
+ ii, attrib.size(), attrib.type(), attrib.normalized(), 0,
+ array_buffer_offset_);
+ array_buffer_offset_ += RoundUpToMultipleOf4(bytes_collected);
+ DCHECK_LE(array_buffer_offset_, array_buffer_size_);
+ }
+ }
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ return true;
+}
+
+// Copies in indices to the service and returns the highest index accessed + 1
+bool VertexArrayObjectManager::SetupSimulatedIndexAndClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei count,
+ GLenum type,
+ GLsizei primcount,
+ const void* indices,
+ GLuint* offset,
+ bool* simulated) {
+ *simulated = false;
+ *offset = ToGLuint(indices);
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ GLsizei num_elements = 0;
+ if (bound_vertex_array_object_->bound_element_array_buffer() == 0) {
+ *simulated = true;
+ *offset = 0;
+ GLsizei max_index = -1;
+ switch (type) {
+ case GL_UNSIGNED_BYTE: {
+ const uint8* src = static_cast<const uint8*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (src[ii] > max_index) {
+ max_index = src[ii];
+ }
+ }
+ break;
+ }
+ case GL_UNSIGNED_SHORT: {
+ const uint16* src = static_cast<const uint16*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (src[ii] > max_index) {
+ max_index = src[ii];
+ }
+ }
+ break;
+ }
+ case GL_UNSIGNED_INT: {
+ uint32 max_glsizei = static_cast<uint32>(
+ std::numeric_limits<GLsizei>::max());
+ const uint32* src = static_cast<const uint32*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ // Other parts of the API use GLsizei (signed) to store limits.
+ // As such, if we encounter a index that cannot be represented with
+ // an unsigned int we need to flag it as an error here.
+ if(src[ii] > max_glsizei) {
+ gl->SetGLError(
+ GL_INVALID_OPERATION, function_name, "index too large.");
+ return false;
+ }
+ GLsizei signed_index = static_cast<GLsizei>(src[ii]);
+ if (signed_index > max_index) {
+ max_index = signed_index;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ gl_helper->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, element_array_buffer_id_);
+ GLsizei bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type);
+ GLsizei bytes_needed = bytes_per_element * count;
+ if (bytes_needed > element_array_buffer_size_) {
+ element_array_buffer_size_ = bytes_needed;
+ gl->BufferDataHelper(
+ GL_ELEMENT_ARRAY_BUFFER, bytes_needed, NULL, GL_DYNAMIC_DRAW);
+ }
+ gl->BufferSubDataHelper(
+ GL_ELEMENT_ARRAY_BUFFER, 0, bytes_needed, indices);
+
+ num_elements = max_index + 1;
+ } else if (bound_vertex_array_object_->HaveEnabledClientSideBuffers()) {
+ // Index buffer is GL buffer. Ask the service for the highest vertex
+ // that will be accessed. Note: It doesn't matter if another context
+ // changes the contents of any of the buffers. The service will still
+ // validate the indices. We just need to know how much to copy across.
+ num_elements = gl->GetMaxValueInBufferCHROMIUMHelper(
+ bound_vertex_array_object_->bound_element_array_buffer(),
+ count, type, ToGLuint(indices)) + 1;
+ }
+
+ bool simulated_client_side_buffers = false;
+ SetupSimulatedClientSideBuffers(
+ function_name, gl, gl_helper, num_elements, primcount,
+ &simulated_client_side_buffers);
+ *simulated = *simulated || simulated_client_side_buffers;
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager.h b/gpu/command_buffer/client/vertex_array_object_manager.h
new file mode 100644
index 0000000..34f630d
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+
+#include <GLES2/gl2.h>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+class GLES2CmdHelper;
+class VertexArrayObject;
+
+// VertexArrayObjectManager manages vertex array objects on the client side
+// of the command buffer.
+class GLES2_IMPL_EXPORT VertexArrayObjectManager {
+ public:
+ VertexArrayObjectManager(
+ GLuint max_vertex_attribs,
+ GLuint array_buffer_id,
+ GLuint element_array_buffer_id);
+ ~VertexArrayObjectManager();
+
+ bool IsReservedId(GLuint id) const;
+
+ // Binds an element array.
+ // Returns true if service should be called.
+ bool BindElementArray(GLuint id);
+
+ // Unbind buffer.
+ void UnbindBuffer(GLuint id);
+
+ // Geneates array objects for the given ids.
+ void GenVertexArrays(GLsizei n, const GLuint* arrays);
+
+ // Deletes array objects for the given ids.
+ void DeleteVertexArrays(GLsizei n, const GLuint* arrays);
+
+ // Binds a vertex array.
+ // changed will be set to true if the service should be called.
+ // Returns false if array is an unknown id.
+ bool BindVertexArray(GLuint array, bool* changed);
+
+ // simulated will be set to true if buffers were simulated.
+ // Returns true service should be called.
+ bool SetupSimulatedClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei num_elements,
+ GLsizei primcount,
+ bool* simulated);
+
+ // Returns true if buffers were setup.
+ bool SetupSimulatedIndexAndClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei count,
+ GLenum type,
+ GLsizei primcount,
+ const void* indices,
+ GLuint* offset,
+ bool* simulated);
+
+ bool HaveEnabledClientSideBuffers() const;
+
+ void SetAttribEnable(GLuint index, bool enabled);
+
+ bool GetVertexAttrib(GLuint index, GLenum pname, uint32* param);
+
+ bool GetAttribPointer(GLuint index, GLenum pname, void** ptr) const;
+
+ // Returns false if error.
+ bool SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr);
+
+ void SetAttribDivisor(GLuint index, GLuint divisor);
+
+ GLuint bound_element_array_buffer() const;
+
+ private:
+ typedef base::hash_map<GLuint, VertexArrayObject*> VertexArrayObjectMap;
+
+ bool IsDefaultVAOBound() const;
+
+ GLsizei CollectData(const void* data,
+ GLsizei bytes_per_element,
+ GLsizei real_stride,
+ GLsizei num_elements);
+
+ GLuint max_vertex_attribs_;
+ GLuint array_buffer_id_;
+ GLsizei array_buffer_size_;
+ GLsizei array_buffer_offset_;
+ GLuint element_array_buffer_id_;
+ GLsizei element_array_buffer_size_;
+ GLsizei collection_buffer_size_;
+ scoped_ptr<int8[]> collection_buffer_;
+
+ VertexArrayObject* default_vertex_array_object_;
+ VertexArrayObject* bound_vertex_array_object_;
+ VertexArrayObjectMap vertex_array_objects_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayObjectManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc b/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
new file mode 100644
index 0000000..b3ac065
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+
+#include <GLES2/gl2ext.h>
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class VertexArrayObjectManagerTest : public testing::Test {
+ protected:
+ static const GLuint kMaxAttribs = 4;
+ static const GLuint kClientSideArrayBuffer = 0x1234;
+ static const GLuint kClientSideElementArrayBuffer = 0x1235;
+
+ virtual void SetUp() {
+ manager_.reset(new VertexArrayObjectManager(
+ kMaxAttribs,
+ kClientSideArrayBuffer,
+ kClientSideElementArrayBuffer));
+ }
+ virtual void TearDown() {
+ }
+
+ scoped_ptr<VertexArrayObjectManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const GLuint VertexArrayObjectManagerTest::kMaxAttribs;
+const GLuint VertexArrayObjectManagerTest::kClientSideArrayBuffer;
+const GLuint VertexArrayObjectManagerTest::kClientSideElementArrayBuffer;
+#endif
+
+TEST_F(VertexArrayObjectManagerTest, Basic) {
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check out of bounds access.
+ uint32 param;
+ void* ptr;
+ EXPECT_FALSE(manager_->GetVertexAttrib(
+ kMaxAttribs, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_FALSE(manager_->GetAttribPointer(
+ kMaxAttribs, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ // Check defaults.
+ for (GLuint ii = 0; ii < kMaxAttribs; ++ii) {
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_SIZE, ¶m));
+ EXPECT_EQ(4u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_TYPE, ¶m));
+ EXPECT_EQ(static_cast<uint32>(GL_FLOAT), param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetAttribPointer(
+ ii, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ EXPECT_TRUE(NULL == ptr);
+ }
+}
+
+TEST_F(VertexArrayObjectManagerTest, UnbindBuffer) {
+ const GLuint kBufferToUnbind = 123;
+ const GLuint kBufferToRemain = 456;
+ const GLuint kElementArray = 789;
+ bool changed = false;
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+ // Bind buffers to attribs on 2 vaos.
+ for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToUnbind, 0, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToRemain, 1, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToUnbind, 2, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToRemain, 3, 4, GL_FLOAT, false, 0, 0));
+ for (size_t jj = 0; jj < 4u; ++jj) {
+ manager_->SetAttribEnable(jj, true);
+ }
+ manager_->BindElementArray(kElementArray);
+ }
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ EXPECT_TRUE(manager_->BindVertexArray(ids[0], &changed));
+ // Unbind the buffer.
+ manager_->UnbindBuffer(kBufferToUnbind);
+ manager_->UnbindBuffer(kElementArray);
+ // The attribs are still enabled but their buffer is 0.
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check the status of the bindings.
+ static const uint32 expected[][4] = {
+ { 0, kBufferToRemain, 0, kBufferToRemain, },
+ { kBufferToUnbind, kBufferToRemain, kBufferToUnbind, kBufferToRemain, },
+ };
+ static const GLuint expected_element_array[] = {
+ 0, kElementArray,
+ };
+ for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
+ for (size_t jj = 0; jj < 4; ++jj) {
+ uint32 param = 1;
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ jj, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(expected[ii][jj], param)
+ << "id: " << ids[ii] << ", attrib: " << jj;
+ }
+ EXPECT_EQ(expected_element_array[ii],
+ manager_->bound_element_array_buffer());
+ }
+
+ // The vao that was not bound still has all service side bufferws.
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+
+ // Make sure unbinding 0 does not effect count incorrectly.
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ manager_->SetAttribEnable(2, true);
+ manager_->UnbindBuffer(0);
+ manager_->SetAttribEnable(2, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+}
+
+TEST_F(VertexArrayObjectManagerTest, GetSet) {
+ const char* dummy = "dummy";
+ const void* p = reinterpret_cast<const void*>(dummy);
+ manager_->SetAttribEnable(1, true);
+ manager_->SetAttribPointer(123, 1, 3, GL_BYTE, true, 3, p);
+ uint32 param;
+ void* ptr;
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(123u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_NE(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_SIZE, ¶m));
+ EXPECT_EQ(3u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_TYPE, ¶m));
+ EXPECT_EQ(static_cast<uint32>(GL_BYTE), param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, ¶m));
+ EXPECT_NE(0u, param);
+ EXPECT_TRUE(manager_->GetAttribPointer(
+ 1, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ EXPECT_EQ(p, ptr);
+
+ // Check that getting the divisor is passed to the service.
+ // This is because the divisor is an optional feature which
+ // only the service can validate.
+ EXPECT_FALSE(manager_->GetVertexAttrib(
+ 0, GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE, ¶m));
+}
+
+TEST_F(VertexArrayObjectManagerTest, HaveEnabledClientSideArrays) {
+ // Check turning on an array.
+ manager_->SetAttribEnable(1, true);
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check turning off an array.
+ manager_->SetAttribEnable(1, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check turning on an array and assigning a buffer.
+ manager_->SetAttribEnable(1, true);
+ manager_->SetAttribPointer(123, 1, 3, GL_BYTE, true, 3, NULL);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check unassigning a buffer.
+ manager_->SetAttribPointer(0, 1, 3, GL_BYTE, true, 3, NULL);
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check disabling the array.
+ manager_->SetAttribEnable(1, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+}
+
+TEST_F(VertexArrayObjectManagerTest, BindElementArray) {
+ bool changed = false;
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+
+ // Check the default element array is 0.
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(0u));
+ // Check binding a new element array requires a service call.
+ EXPECT_TRUE(manager_->BindElementArray(55u));
+ // Check the element array was bound.
+ EXPECT_EQ(55u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(55u));
+
+ // Check with a new vao.
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ // Check the default element array is 0.
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ // Check binding a new element array requires a service call.
+ EXPECT_TRUE(manager_->BindElementArray(11u));
+ // Check the element array was bound.
+ EXPECT_EQ(11u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(11u));
+
+ // check switching vao bindings returns the correct element array.
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_EQ(55u, manager_->bound_element_array_buffer());
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ EXPECT_EQ(11u, manager_->bound_element_array_buffer());
+}
+
+TEST_F(VertexArrayObjectManagerTest, GenBindDelete) {
+ // Check unknown array fails.
+ bool changed = false;
+ EXPECT_FALSE(manager_->BindVertexArray(123, &changed));
+ EXPECT_FALSE(changed);
+
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+ // Check Genned arrays succeed.
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ EXPECT_TRUE(changed);
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_TRUE(changed);
+
+ // Check binding the same array returns changed as false.
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_FALSE(changed);
+
+ // Check deleted ararys fail to bind
+ manager_->DeleteVertexArrays(2, ids);
+ EXPECT_FALSE(manager_->BindVertexArray(1, &changed));
+ EXPECT_FALSE(changed);
+ EXPECT_FALSE(manager_->BindVertexArray(3, &changed));
+ EXPECT_FALSE(changed);
+
+ // Check binding 0 returns changed as false since it's
+ // already bound.
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_FALSE(changed);
+}
+
+TEST_F(VertexArrayObjectManagerTest, IsReservedId) {
+ EXPECT_TRUE(manager_->IsReservedId(kClientSideArrayBuffer));
+ EXPECT_TRUE(manager_->IsReservedId(kClientSideElementArrayBuffer));
+ EXPECT_FALSE(manager_->IsReservedId(0));
+ EXPECT_FALSE(manager_->IsReservedId(1));
+ EXPECT_FALSE(manager_->IsReservedId(2));
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/cmd_buffer_functions.txt b/gpu/command_buffer/cmd_buffer_functions.txt
new file mode 100644
index 0000000..5964cd8
--- /dev/null
+++ b/gpu/command_buffer/cmd_buffer_functions.txt
@@ -0,0 +1,228 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is read by build_gles2_cmd_buffer.py to generate commands.
+
+GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture);
+GL_APICALL void GL_APIENTRY glAttachShader (GLidProgram program, GLidShader shader);
+GL_APICALL void GL_APIENTRY glBindAttribLocation (GLidProgram program, GLuint index, const char* name);
+GL_APICALL void GL_APIENTRY glBindBuffer (GLenumBufferTarget target, GLidBindBuffer buffer);
+GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenumFrameBufferTarget target, GLidBindFramebuffer framebuffer);
+GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenumRenderBufferTarget target, GLidBindRenderbuffer renderbuffer);
+GL_APICALL void GL_APIENTRY glBindTexture (GLenumTextureBindTarget target, GLidBindTexture texture);
+GL_APICALL void GL_APIENTRY glBlendColor (GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha);
+GL_APICALL void GL_APIENTRY glBlendEquation ( GLenumEquation mode );
+GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenumEquation modeRGB, GLenumEquation modeAlpha);
+GL_APICALL void GL_APIENTRY glBlendFunc (GLenumSrcBlendFactor sfactor, GLenumDstBlendFactor dfactor);
+GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenumSrcBlendFactor srcRGB, GLenumDstBlendFactor dstRGB, GLenumSrcBlendFactor srcAlpha, GLenumDstBlendFactor dstAlpha);
+GL_APICALL void GL_APIENTRY glBufferData (GLenumBufferTarget target, GLsizeiptr size, const void* data, GLenumBufferUsage usage);
+GL_APICALL void GL_APIENTRY glBufferSubData (GLenumBufferTarget target, GLintptrNotNegative offset, GLsizeiptr size, const void* data);
+GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenumFrameBufferTarget target);
+GL_APICALL void GL_APIENTRY glClear (GLbitfield mask);
+GL_APICALL void GL_APIENTRY glClearColor (GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha);
+GL_APICALL void GL_APIENTRY glClearDepthf (GLclampf depth);
+GL_APICALL void GL_APIENTRY glClearStencil (GLint s);
+GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
+GL_APICALL void GL_APIENTRY glCompileShader (GLidShader shader);
+GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenumTextureTarget target, GLint level, GLenumCompressedTextureFormat internalformat, GLsizei width, GLsizei height, GLintTextureBorder border, GLsizei imageSize, const void* data);
+GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenumTextureTarget target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenumCompressedTextureFormat format, GLsizei imageSize, const void* data);
+GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenumTextureTarget target, GLint level, GLenumTextureInternalFormat internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLintTextureBorder border);
+GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenumTextureTarget target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GL_APICALL GLuint GL_APIENTRY glCreateProgram (void);
+GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenumShaderType type);
+GL_APICALL void GL_APIENTRY glCullFace (GLenumFaceType mode);
+GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizeiNotNegative n, const GLuint* buffers);
+GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizeiNotNegative n, const GLuint* framebuffers);
+GL_APICALL void GL_APIENTRY glDeleteProgram (GLidProgram program);
+GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizeiNotNegative n, const GLuint* renderbuffers);
+GL_APICALL void GL_APIENTRY glDeleteShader (GLidShader shader);
+GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizeiNotNegative n, const GLuint* textures);
+GL_APICALL void GL_APIENTRY glDepthFunc (GLenumCmpFunction func);
+GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag);
+GL_APICALL void GL_APIENTRY glDepthRangef (GLclampf zNear, GLclampf zFar);
+GL_APICALL void GL_APIENTRY glDetachShader (GLidProgram program, GLidShader shader);
+GL_APICALL void GL_APIENTRY glDisable (GLenumCapability cap);
+GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index);
+GL_APICALL void GL_APIENTRY glDrawArrays (GLenumDrawMode mode, GLint first, GLsizei count);
+GL_APICALL void GL_APIENTRY glDrawElements (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices);
+GL_APICALL void GL_APIENTRY glEnable (GLenumCapability cap);
+GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index);
+GL_APICALL void GL_APIENTRY glFinish (void);
+GL_APICALL void GL_APIENTRY glFlush (void);
+GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenumFrameBufferTarget target, GLenumAttachment attachment, GLenumRenderBufferTarget renderbuffertarget, GLidRenderbuffer renderbuffer);
+GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenumFrameBufferTarget target, GLenumAttachment attachment, GLenumTextureTarget textarget, GLidTexture texture, GLintZeroOnly level);
+GL_APICALL void GL_APIENTRY glFrontFace (GLenumFaceMode mode);
+GL_APICALL void GL_APIENTRY glGenBuffers (GLsizeiNotNegative n, GLuint* buffers);
+GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenumTextureBindTarget target);
+GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizeiNotNegative n, GLuint* framebuffers);
+GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizeiNotNegative n, GLuint* renderbuffers);
+GL_APICALL void GL_APIENTRY glGenTextures (GLsizeiNotNegative n, GLuint* textures);
+GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLidProgram program, GLuint index, GLsizeiNotNegative bufsize, GLsizeiOptional* length, GLint* size, GLenum* type, char* name);
+GL_APICALL void GL_APIENTRY glGetActiveUniform (GLidProgram program, GLuint index, GLsizeiNotNegative bufsize, GLsizeiOptional* length, GLint* size, GLenum* type, char* name);
+GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLidProgram program, GLsizeiNotNegative maxcount, GLsizeiOptional* count, GLuint* shaders);
+GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLidProgram program, const char* name);
+GL_APICALL void GL_APIENTRY glGetBooleanv (GLenumGLState pname, GLboolean* params);
+GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenumBufferTarget target, GLenumBufferParameter pname, GLint* params);
+GL_APICALL GLenum GL_APIENTRY glGetError (void);
+GL_APICALL void GL_APIENTRY glGetFloatv (GLenumGLState pname, GLfloat* params);
+GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenumFrameBufferTarget target, GLenumAttachment attachment, GLenumFrameBufferParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetIntegerv (GLenumGLState pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetProgramiv (GLidProgram program, GLenumProgramParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLidProgram program, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* infolog);
+GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenumRenderBufferTarget target, GLenumRenderBufferParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetShaderiv (GLidShader shader, GLenumShaderParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* infolog);
+GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenumShaderType shadertype, GLenumShaderPrecision precisiontype, GLint* range, GLint* precision);
+GL_APICALL void GL_APIENTRY glGetShaderSource (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* source);
+GL_APICALL const GLubyte* GL_APIENTRY glGetString (GLenumStringType name);
+GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenumGetTexParamTarget target, GLenumTextureParameter pname, GLfloat* params);
+GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenumGetTexParamTarget target, GLenumTextureParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetUniformfv (GLidProgram program, GLintUniformLocation location, GLfloat* params);
+GL_APICALL void GL_APIENTRY glGetUniformiv (GLidProgram program, GLintUniformLocation location, GLint* params);
+GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLidProgram program, const char* name);
+GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenumVertexAttribute pname, GLfloat* params);
+GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenumVertexAttribute pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenumVertexPointer pname, void** pointer);
+GL_APICALL void GL_APIENTRY glHint (GLenumHintTarget target, GLenumHintMode mode);
+GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLidBuffer buffer);
+GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenumCapability cap);
+GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLidFramebuffer framebuffer);
+GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLidProgram program);
+GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLidRenderbuffer renderbuffer);
+GL_APICALL GLboolean GL_APIENTRY glIsShader (GLidShader shader);
+GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLidTexture texture);
+GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width);
+GL_APICALL void GL_APIENTRY glLinkProgram (GLidProgram program);
+GL_APICALL void GL_APIENTRY glPixelStorei (GLenumPixelStore pname, GLintPixelStoreAlignment param);
+GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units);
+GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenumReadPixelFormat format, GLenumPixelType type, void* pixels);
+GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void);
+GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenumRenderBufferTarget target, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glSampleCoverage (GLclampf value, GLboolean invert);
+GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glShaderBinary (GLsizeiNotNegative n, const GLuint* shaders, GLenumShaderBinaryFormat binaryformat, const void* binary, GLsizeiNotNegative length);
+GL_APICALL void GL_APIENTRY glShaderSource (GLidShader shader, GLsizeiNotNegative count, const GLchar* const* str, const GLint* length);
+GL_APICALL void GL_APIENTRY glShallowFinishCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glShallowFlushCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glStencilFunc (GLenumCmpFunction func, GLint ref, GLuint mask);
+GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenumFaceType face, GLenumCmpFunction func, GLint ref, GLuint mask);
+GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask);
+GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenumFaceType face, GLuint mask);
+GL_APICALL void GL_APIENTRY glStencilOp (GLenumStencilOp fail, GLenumStencilOp zfail, GLenumStencilOp zpass);
+GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenumFaceType face, GLenumStencilOp fail, GLenumStencilOp zfail, GLenumStencilOp zpass);
+GL_APICALL void GL_APIENTRY glTexImage2D (GLenumTextureTarget target, GLint level, GLintTextureInternalFormat internalformat, GLsizei width, GLsizei height, GLintTextureBorder border, GLenumTextureFormat format, GLenumPixelType type, const void* pixels);
+GL_APICALL void GL_APIENTRY glTexParameterf (GLenumTextureBindTarget target, GLenumTextureParameter pname, GLfloat param);
+GL_APICALL void GL_APIENTRY glTexParameterfv (GLenumTextureBindTarget target, GLenumTextureParameter pname, const GLfloat* params);
+GL_APICALL void GL_APIENTRY glTexParameteri (GLenumTextureBindTarget target, GLenumTextureParameter pname, GLint param);
+GL_APICALL void GL_APIENTRY glTexParameteriv (GLenumTextureBindTarget target, GLenumTextureParameter pname, const GLint* params);
+GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenumTextureTarget target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenumTextureFormat format, GLenumPixelType type, const void* pixels);
+GL_APICALL void GL_APIENTRY glUniform1f (GLintUniformLocation location, GLfloat x);
+GL_APICALL void GL_APIENTRY glUniform1fv (GLintUniformLocation location, GLsizeiNotNegative count, const GLfloat* v);
+GL_APICALL void GL_APIENTRY glUniform1i (GLintUniformLocation location, GLint x);
+GL_APICALL void GL_APIENTRY glUniform1iv (GLintUniformLocation location, GLsizeiNotNegative count, const GLint* v);
+GL_APICALL void GL_APIENTRY glUniform2f (GLintUniformLocation location, GLfloat x, GLfloat y);
+GL_APICALL void GL_APIENTRY glUniform2fv (GLintUniformLocation location, GLsizeiNotNegative count, const GLfloat* v);
+GL_APICALL void GL_APIENTRY glUniform2i (GLintUniformLocation location, GLint x, GLint y);
+GL_APICALL void GL_APIENTRY glUniform2iv (GLintUniformLocation location, GLsizeiNotNegative count, const GLint* v);
+GL_APICALL void GL_APIENTRY glUniform3f (GLintUniformLocation location, GLfloat x, GLfloat y, GLfloat z);
+GL_APICALL void GL_APIENTRY glUniform3fv (GLintUniformLocation location, GLsizeiNotNegative count, const GLfloat* v);
+GL_APICALL void GL_APIENTRY glUniform3i (GLintUniformLocation location, GLint x, GLint y, GLint z);
+GL_APICALL void GL_APIENTRY glUniform3iv (GLintUniformLocation location, GLsizeiNotNegative count, const GLint* v);
+GL_APICALL void GL_APIENTRY glUniform4f (GLintUniformLocation location, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GL_APICALL void GL_APIENTRY glUniform4fv (GLintUniformLocation location, GLsizeiNotNegative count, const GLfloat* v);
+GL_APICALL void GL_APIENTRY glUniform4i (GLintUniformLocation location, GLint x, GLint y, GLint z, GLint w);
+GL_APICALL void GL_APIENTRY glUniform4iv (GLintUniformLocation location, GLsizeiNotNegative count, const GLint* v);
+GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLintUniformLocation location, GLsizeiNotNegative count, GLbooleanFalseOnly transpose, const GLfloat* value);
+GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLintUniformLocation location, GLsizeiNotNegative count, GLbooleanFalseOnly transpose, const GLfloat* value);
+GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLintUniformLocation location, GLsizeiNotNegative count, GLbooleanFalseOnly transpose, const GLfloat* value);
+GL_APICALL void GL_APIENTRY glUseProgram (GLidZeroProgram program);
+GL_APICALL void GL_APIENTRY glValidateProgram (GLidProgram program);
+GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint indx, GLfloat x);
+GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint indx, const GLfloat* values);
+GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint indx, GLfloat x, GLfloat y);
+GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint indx, const GLfloat* values);
+GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint indx, GLfloat x, GLfloat y, GLfloat z);
+GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint indx, const GLfloat* values);
+GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint indx, const GLfloat* values);
+GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint indx, GLintVertexAttribSize size, GLenumVertexAttribType type, GLboolean normalized, GLsizei stride, const void* ptr);
+GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glBlitFramebufferCHROMIUM (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenumBlitFilter filter);
+GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleCHROMIUM (GLenumRenderBufferTarget target, GLsizei samples, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenumRenderBufferTarget target, GLsizei samples, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenumFrameBufferTarget target, GLenumAttachment attachment, GLenumTextureTarget textarget, GLidTexture texture, GLintZeroOnly level, GLsizei samples);
+GL_APICALL void GL_APIENTRY glTexStorage2DEXT (GLenumTextureTarget target, GLsizei levels, GLenumTextureInternalFormatStorage internalFormat, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glGenQueriesEXT (GLsizeiNotNegative n, GLuint* queries);
+GL_APICALL void GL_APIENTRY glDeleteQueriesEXT (GLsizeiNotNegative n, const GLuint* queries);
+GL_APICALL GLboolean GL_APIENTRY glIsQueryEXT (GLidQuery id);
+GL_APICALL void GL_APIENTRY glBeginQueryEXT (GLenumQueryTarget target, GLidQuery id);
+GL_APICALL void GL_APIENTRY glEndQueryEXT (GLenumQueryTarget target);
+GL_APICALL void GL_APIENTRY glGetQueryivEXT (GLenumQueryTarget target, GLenumQueryParameter pname, GLint* params);
+GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLidQuery id, GLenumQueryObjectParameter pname, GLuint* params);
+GL_APICALL void GL_APIENTRY glInsertEventMarkerEXT (GLsizei length, const GLchar* marker);
+GL_APICALL void GL_APIENTRY glPushGroupMarkerEXT (GLsizei length, const GLchar* marker);
+GL_APICALL void GL_APIENTRY glPopGroupMarkerEXT (void);
+
+GL_APICALL void GL_APIENTRY glGenVertexArraysOES (GLsizeiNotNegative n, GLuint* arrays);
+GL_APICALL void GL_APIENTRY glDeleteVertexArraysOES (GLsizeiNotNegative n, const GLuint* arrays);
+GL_APICALL GLboolean GL_APIENTRY glIsVertexArrayOES (GLidVertexArray array);
+GL_APICALL void GL_APIENTRY glBindVertexArrayOES (GLidBindVertexArray array);
+
+// Non-GL commands.
+GL_APICALL void GL_APIENTRY glSwapBuffers (void);
+GL_APICALL GLuint GL_APIENTRY glGetMaxValueInBufferCHROMIUM (GLidBuffer buffer_id, GLsizei count, GLenumGetMaxIndexType type, GLuint offset);
+GL_APICALL GLboolean GL_APIENTRY glEnableFeatureCHROMIUM (const char* feature);
+GL_APICALL void* GL_APIENTRY glMapBufferCHROMIUM (GLuint target, GLenum access);
+GL_APICALL GLboolean GL_APIENTRY glUnmapBufferCHROMIUM (GLuint target);
+GL_APICALL void* GL_APIENTRY glMapImageCHROMIUM (GLuint image_id);
+GL_APICALL void GL_APIENTRY glUnmapImageCHROMIUM (GLuint image_id);
+
+GL_APICALL void* GL_APIENTRY glMapBufferSubDataCHROMIUM (GLuint target, GLintptrNotNegative offset, GLsizeiptr size, GLenum access);
+GL_APICALL void GL_APIENTRY glUnmapBufferSubDataCHROMIUM (const void* mem);
+GL_APICALL void* GL_APIENTRY glMapTexSubImage2DCHROMIUM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLenum access);
+GL_APICALL void GL_APIENTRY glUnmapTexSubImage2DCHROMIUM (const void* mem);
+GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height, GLfloat scale_factor);
+GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension);
+GL_APICALL void GL_APIENTRY glRateLimitOffscreenContextCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glGetMultipleIntegervCHROMIUM (const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size);
+GL_APICALL void GL_APIENTRY glGetProgramInfoCHROMIUM (GLidProgram program, GLsizeiNotNegative bufsize, GLsizei* size, void* info);
+GL_APICALL GLuint GL_APIENTRY glCreateStreamTextureCHROMIUM (GLuint texture);
+GL_APICALL GLuint GL_APIENTRY glCreateImageCHROMIUM (GLsizei width, GLsizei height, GLenum internalformat, GLenum usage);
+GL_APICALL void GL_APIENTRY glDestroyImageCHROMIUM (GLuint image_id);
+GL_APICALL void GL_APIENTRY glGetImageParameterivCHROMIUM (GLuint image_id, GLenum pname, GLint* params);
+GL_APICALL GLuint GL_APIENTRY glCreateGpuMemoryBufferImageCHROMIUM (GLsizei width, GLsizei height, GLenum internalformat, GLenum usage);
+GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* source);
+GL_APICALL void GL_APIENTRY glPostSubBufferCHROMIUM (GLint x, GLint y, GLint width, GLint height);
+GL_APICALL void GL_APIENTRY glTexImageIOSurface2DCHROMIUM (GLenumTextureBindTarget target, GLsizei width, GLsizei height, GLuint ioSurfaceId, GLuint plane);
+GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLenum target, GLenum source_id, GLenum dest_id, GLint level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type);
+GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount);
+GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount);
+GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor);
+GL_APICALL void GL_APIENTRY glGenMailboxCHROMIUM (GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glProduceTextureCHROMIUM (GLenumTextureBindTarget target, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glProduceTextureDirectCHROMIUM (GLidBindTexture texture, GLenumTextureBindTarget target, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glConsumeTextureCHROMIUM (GLenumTextureBindTarget target, const GLbyte* mailbox);
+GL_APICALL GLuint GL_APIENTRY glCreateAndConsumeTextureCHROMIUM (GLenumTextureBindTarget target, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glBindUniformLocationCHROMIUM (GLidProgram program, GLint location, const char* name);
+GL_APICALL void GL_APIENTRY glBindTexImage2DCHROMIUM (GLenumTextureBindTarget target, GLint imageId);
+GL_APICALL void GL_APIENTRY glReleaseTexImage2DCHROMIUM (GLenumTextureBindTarget target, GLint imageId);
+GL_APICALL void GL_APIENTRY glTraceBeginCHROMIUM (const char* name);
+GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glAsyncTexSubImage2DCHROMIUM (GLenumTextureTarget target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenumTextureFormat format, GLenumPixelType type, const void* data);
+GL_APICALL void GL_APIENTRY glAsyncTexImage2DCHROMIUM (GLenumTextureTarget target, GLint level, GLenumTextureInternalFormat internalformat, GLsizei width, GLsizei height, GLintTextureBorder border, GLenumTextureFormat format, GLenumPixelType type, const void* pixels);
+GL_APICALL void GL_APIENTRY glWaitAsyncTexImage2DCHROMIUM (GLenumTextureTarget target);
+GL_APICALL void GL_APIENTRY glWaitAllAsyncTexImage2DCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei count, const GLenum* attachments);
+GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenumResetStatus current, GLenumResetStatus other);
+GL_APICALL GLuint GL_APIENTRY glInsertSyncPointCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glWaitSyncPointCHROMIUM (GLuint sync_point);
+GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei count, const GLenum* bufs);
+GL_APICALL void GL_APIENTRY glDiscardBackbufferCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glScheduleOverlayPlaneCHROMIUM (GLint plane_z_order, GLenum plane_transform, GLuint overlay_texture_id, GLint bounds_x, GLint bounds_y, GLint bounds_width, GLint bounds_height, GLfloat uv_x, GLfloat uv_y, GLfloat uv_width, GLfloat uv_height);
+
+// Extension CHROMIUM_path_rendering.
+GL_APICALL void GL_APIENTRY glMatrixLoadfCHROMIUM (GLenumMatrixMode matrixMode, const GLfloat* m);
+GL_APICALL void GL_APIENTRY glMatrixLoadIdentityCHROMIUM (GLenumMatrixMode matrixMode);
+
diff --git a/gpu/command_buffer/command_buffer.gyp b/gpu/command_buffer/command_buffer.gyp
new file mode 100644
index 0000000..8ffd15b
--- /dev/null
+++ b/gpu/command_buffer/command_buffer.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ 'command_buffer.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'gles2_utils',
+ 'type': '<(component)',
+ 'variables': {
+ 'gles2_utils_target': 1,
+ },
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ '../../third_party/khronos/khronos.gyp:khronos_headers',
+ ],
+ 'export_dependent_settings': [
+ '../../base/base.gyp:base',
+ ],
+ },
+ ],
+}
+
diff --git a/gpu/command_buffer/command_buffer.gypi b/gpu/command_buffer/command_buffer.gypi
new file mode 100644
index 0000000..473da92
--- /dev/null
+++ b/gpu/command_buffer/command_buffer.gypi
@@ -0,0 +1,25 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'variables': {
+ 'gles2_utils_target': 0,
+ },
+ 'target_conditions': [
+ # This part is shared between the targets defined below.
+ ['gles2_utils_target==1', {
+ 'defines': [
+ 'GLES2_UTILS_IMPLEMENTATION',
+ ],
+ 'sources': [
+ 'common/gles2_cmd_format.h',
+ 'common/gles2_cmd_utils.cc',
+ 'common/gles2_cmd_utils.h',
+ 'common/gles2_utils_export.h',
+ ],
+ }],
+ ],
+ },
+}
diff --git a/gpu/command_buffer/command_buffer_nacl.gyp b/gpu/command_buffer/command_buffer_nacl.gyp
new file mode 100644
index 0000000..aabd1c8
--- /dev/null
+++ b/gpu/command_buffer/command_buffer_nacl.gyp
@@ -0,0 +1,36 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../../build/common_untrusted.gypi',
+ 'command_buffer.gypi',
+ ],
+ 'conditions': [
+ ['disable_nacl==0 and disable_nacl_untrusted==0', {
+ 'targets': [
+ {
+ 'target_name': 'gles2_utils_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'gles2_utils_target': 1,
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libgles2_utils_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 1,
+ },
+ 'dependencies': [
+ '../../native_client/tools.gyp:prep_toolchain',
+ '../../base/base_nacl.gyp:base_nacl',
+ '../../third_party/khronos/khronos.gyp:khronos_headers',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/gpu/command_buffer/common/BUILD.gn b/gpu/command_buffer/common/BUILD.gn
new file mode 100644
index 0000000..ad6d320
--- /dev/null
+++ b/gpu/command_buffer/common/BUILD.gn
@@ -0,0 +1,55 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("common") {
+ sources = [
+ "bitfield_helpers.h",
+ "buffer.cc",
+ "buffer.h",
+ "capabilities.cc",
+ "capabilities.h",
+ "cmd_buffer_common.cc",
+ "cmd_buffer_common.h",
+ "command_buffer.h",
+ "constants.h",
+ "debug_marker_manager.cc",
+ "debug_marker_manager.h",
+ "gles2_cmd_format.cc",
+ "gles2_cmd_format.h",
+ "gles2_cmd_format_autogen.h",
+ "gles2_cmd_ids.h",
+ "gles2_cmd_ids_autogen.h",
+ "id_allocator.cc",
+ "id_allocator.h",
+ "mailbox.cc",
+ "mailbox.h",
+ "mailbox_holder.cc",
+ "mailbox_holder.h",
+ "thread_local.h",
+ "time.h",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ deps = [
+ ":gles2_utils",
+ "//base",
+ ]
+}
+
+component("gles2_utils") {
+ sources = [
+ "gles2_cmd_utils.cc",
+ "gles2_cmd_utils.h",
+ "gles2_utils_export.h",
+ ]
+
+ defines = [ "GLES2_UTILS_IMPLEMENTATION" ]
+
+ deps = [
+ "//base",
+ ]
+
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+}
diff --git a/gpu/command_buffer/common/bitfield_helpers.h b/gpu/command_buffer/common/bitfield_helpers.h
new file mode 100644
index 0000000..62841ee
--- /dev/null
+++ b/gpu/command_buffer/common/bitfield_helpers.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a helper template class used to access bit fields in
+// unsigned int_ts.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_BITFIELD_HELPERS_H_
+#define GPU_COMMAND_BUFFER_COMMON_BITFIELD_HELPERS_H_
+
+namespace gpu {
+
+// Bitfield template class, used to access bit fields in unsigned int_ts.
+template<int shift, int length> class BitField {
+ public:
+ static const unsigned int kShift = shift;
+ static const unsigned int kLength = length;
+ // the following is really (1<<length)-1 but also work for length == 32
+ // without compiler warning.
+ static const unsigned int kMask = 1U + ((1U << (length-1)) - 1U) * 2U;
+
+ // Gets the value contained in this field.
+ static unsigned int Get(unsigned int container) {
+ return (container >> kShift) & kMask;
+ }
+
+ // Makes a value that can be or-ed into this field.
+ static unsigned int MakeValue(unsigned int value) {
+ return (value & kMask) << kShift;
+ }
+
+ // Changes the value of this field.
+ static void Set(unsigned int *container, unsigned int field_value) {
+ *container = (*container & ~(kMask << kShift)) | MakeValue(field_value);
+ }
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_BITFIELD_HELPERS_H_
diff --git a/gpu/command_buffer/common/bitfield_helpers_test.cc b/gpu/command_buffer/common/bitfield_helpers_test.cc
new file mode 100644
index 0000000..705e933
--- /dev/null
+++ b/gpu/command_buffer/common/bitfield_helpers_test.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the bitfield helper class.
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "gpu/command_buffer/common/bitfield_helpers.h"
+
+namespace gpu {
+
+// Tests that BitField<>::Get returns the right bits.
+TEST(BitFieldTest, TestGet) {
+ unsigned int value = 0x12345678u;
+ EXPECT_EQ(0x8u, (BitField<0, 4>::Get(value)));
+ EXPECT_EQ(0x45u, (BitField<12, 8>::Get(value)));
+ EXPECT_EQ(0x12345678u, (BitField<0, 32>::Get(value)));
+}
+
+// Tests that BitField<>::MakeValue generates the right bits.
+TEST(BitFieldTest, TestMakeValue) {
+ EXPECT_EQ(0x00000003u, (BitField<0, 4>::MakeValue(0x3)));
+ EXPECT_EQ(0x00023000u, (BitField<12, 8>::MakeValue(0x123)));
+ EXPECT_EQ(0x87654321u, (BitField<0, 32>::MakeValue(0x87654321)));
+}
+
+// Tests that BitField<>::Set modifies the right bits.
+TEST(BitFieldTest, TestSet) {
+ unsigned int value = 0x12345678u;
+ BitField<0, 4>::Set(&value, 0x9);
+ EXPECT_EQ(0x12345679u, value);
+ BitField<12, 8>::Set(&value, 0x123);
+ EXPECT_EQ(0x12323679u, value);
+ BitField<0, 32>::Set(&value, 0x87654321);
+ EXPECT_EQ(0x87654321u, value);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/buffer.cc b/gpu/command_buffer/common/buffer.cc
new file mode 100644
index 0000000..3b3da43
--- /dev/null
+++ b/gpu/command_buffer/common/buffer.cc
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/buffer.h"
+
+#include "base/logging.h"
+
+#include "base/numerics/safe_math.h"
+
+namespace gpu {
+SharedMemoryBufferBacking::SharedMemoryBufferBacking(
+ scoped_ptr<base::SharedMemory> shared_memory,
+ size_t size)
+ : shared_memory_(shared_memory.Pass()), size_(size) {}
+
+SharedMemoryBufferBacking::~SharedMemoryBufferBacking() {}
+
+void* SharedMemoryBufferBacking::GetMemory() const {
+ return shared_memory_->memory();
+}
+
+size_t SharedMemoryBufferBacking::GetSize() const { return size_; }
+
+Buffer::Buffer(scoped_ptr<BufferBacking> backing)
+ : backing_(backing.Pass()),
+ memory_(backing_->GetMemory()),
+ size_(backing_->GetSize()) {
+ DCHECK(memory_) << "The memory must be mapped to create a Buffer";
+}
+
+Buffer::~Buffer() {}
+
+void* Buffer::GetDataAddress(uint32 data_offset, uint32 data_size) const {
+ base::CheckedNumeric<uint32> end = data_offset;
+ end += data_size;
+ if (!end.IsValid() || end.ValueOrDie() > static_cast<uint32>(size_))
+ return NULL;
+ return static_cast<uint8*>(memory_) + data_offset;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/buffer.h b/gpu/command_buffer/common/buffer.h
new file mode 100644
index 0000000..d8a8356
--- /dev/null
+++ b/gpu/command_buffer/common/buffer.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_BUFFER_H_
+#define GPU_COMMAND_BUFFER_COMMON_BUFFER_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/gpu_export.h"
+
+namespace base {
+ class SharedMemory;
+}
+
+namespace gpu {
+
+class GPU_EXPORT BufferBacking {
+ public:
+ virtual ~BufferBacking() {}
+ virtual void* GetMemory() const = 0;
+ virtual size_t GetSize() const = 0;
+};
+
+class GPU_EXPORT SharedMemoryBufferBacking : public BufferBacking {
+ public:
+ SharedMemoryBufferBacking(scoped_ptr<base::SharedMemory> shared_memory,
+ size_t size);
+ virtual ~SharedMemoryBufferBacking();
+ virtual void* GetMemory() const OVERRIDE;
+ virtual size_t GetSize() const OVERRIDE;
+ base::SharedMemory* shared_memory() { return shared_memory_.get(); }
+
+ private:
+ scoped_ptr<base::SharedMemory> shared_memory_;
+ size_t size_;
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryBufferBacking);
+};
+
+// Buffer owns a piece of shared-memory of a certain size.
+class GPU_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> {
+ public:
+ explicit Buffer(scoped_ptr<BufferBacking> backing);
+
+ BufferBacking* backing() const { return backing_.get(); }
+ void* memory() const { return memory_; }
+ size_t size() const { return size_; }
+
+ // Returns NULL if the address overflows the memory.
+ void* GetDataAddress(uint32 data_offset, uint32 data_size) const;
+
+ private:
+ friend class base::RefCountedThreadSafe<Buffer>;
+ ~Buffer();
+
+ scoped_ptr<BufferBacking> backing_;
+ void* memory_;
+ size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+static inline scoped_ptr<BufferBacking> MakeBackingFromSharedMemory(
+ scoped_ptr<base::SharedMemory> shared_memory,
+ size_t size) {
+ return scoped_ptr<BufferBacking>(
+ new SharedMemoryBufferBacking(shared_memory.Pass(), size));
+}
+
+static inline scoped_refptr<Buffer> MakeBufferFromSharedMemory(
+ scoped_ptr<base::SharedMemory> shared_memory,
+ size_t size) {
+ return new Buffer(MakeBackingFromSharedMemory(shared_memory.Pass(), size));
+}
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_BUFFER_H_
diff --git a/gpu/command_buffer/common/capabilities.cc b/gpu/command_buffer/common/capabilities.cc
new file mode 100644
index 0000000..74b2423
--- /dev/null
+++ b/gpu/command_buffer/common/capabilities.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/capabilities.h"
+
+namespace gpu {
+
+Capabilities::Capabilities()
+ : post_sub_buffer(false),
+ egl_image_external(false),
+ texture_format_bgra8888(false),
+ texture_format_etc1(false),
+ texture_format_etc1_npot(false),
+ texture_rectangle(false),
+ iosurface(false),
+ texture_usage(false),
+ texture_storage(false),
+ discard_framebuffer(false),
+ sync_query(false),
+ image(false),
+ future_sync_points(false) {
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/capabilities.h b/gpu/command_buffer/common/capabilities.h
new file mode 100644
index 0000000..cb0246f
--- /dev/null
+++ b/gpu/command_buffer/common/capabilities.h
@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_CAPABILITIES_H_
+#define GPU_COMMAND_BUFFER_COMMON_CAPABILITIES_H_
+
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+struct GPU_EXPORT Capabilities {
+ bool post_sub_buffer;
+ bool egl_image_external;
+ bool texture_format_bgra8888;
+ bool texture_format_etc1;
+ bool texture_format_etc1_npot;
+ bool texture_rectangle;
+ bool iosurface;
+ bool texture_usage;
+ bool texture_storage;
+ bool discard_framebuffer;
+ bool sync_query;
+ bool image;
+ bool future_sync_points;
+
+ Capabilities();
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_CAPABILITIES_H_
diff --git a/gpu/command_buffer/common/cmd_buffer_common.cc b/gpu/command_buffer/common/cmd_buffer_common.cc
new file mode 100644
index 0000000..f337e86
--- /dev/null
+++ b/gpu/command_buffer/common/cmd_buffer_common.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the binary format definition of the command buffer and
+// command buffer commands.
+
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+
+#include "gpu/command_buffer/common/command_buffer.h"
+
+namespace gpu {
+#if !defined(_WIN32)
+// gcc needs this to link, but MSVC requires it not be present
+const int32 CommandHeader::kMaxSize;
+#endif
+namespace cmd {
+
+const char* GetCommandName(CommandId command_id) {
+ static const char* const names[] = {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) # name,
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+ };
+
+ int id = static_cast<int>(command_id);
+ return (id >= 0 && id < kNumCommands) ? names[id] : "*unknown-command*";
+}
+
+} // namespace cmd
+
+#if !defined(NACL_WIN64)
+// TODO(apatrick): this is a temporary optimization while skia is calling
+// RendererGLContext::MakeCurrent prior to every GL call. It saves returning 6
+// ints redundantly when only the error is needed for the CommandBufferProxy
+// implementation.
+error::Error CommandBuffer::GetLastError() {
+ return GetLastState().error;
+}
+#endif
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/common/cmd_buffer_common.h b/gpu/command_buffer/common/cmd_buffer_common.h
new file mode 100644
index 0000000..828731b
--- /dev/null
+++ b/gpu/command_buffer/common/cmd_buffer_common.h
@@ -0,0 +1,542 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the common parts of command buffer formats.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_CMD_BUFFER_COMMON_H_
+#define GPU_COMMAND_BUFFER_COMMON_CMD_BUFFER_COMMON_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/bitfield_helpers.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+namespace cmd {
+ enum ArgFlags {
+ kFixed = 0x0,
+ kAtLeastN = 0x1
+ };
+} // namespace cmd
+
+// Pack & unpack Command cmd_flags
+#define CMD_FLAG_SET_TRACE_LEVEL(level) ((level & 3) << 0)
+#define CMD_FLAG_GET_TRACE_LEVEL(cmd_flags) ((cmd_flags >> 0) & 3)
+
+// Computes the number of command buffer entries needed for a certain size. In
+// other words it rounds up to a multiple of entries.
+inline uint32_t ComputeNumEntries(size_t size_in_bytes) {
+ return static_cast<uint32_t>(
+ (size_in_bytes + sizeof(uint32_t) - 1) / sizeof(uint32_t)); // NOLINT
+}
+
+// Rounds up to a multiple of entries in bytes.
+inline size_t RoundSizeToMultipleOfEntries(size_t size_in_bytes) {
+ return ComputeNumEntries(size_in_bytes) * sizeof(uint32_t); // NOLINT
+}
+
+// Struct that defines the command header in the command buffer.
+struct CommandHeader {
+ uint32_t size:21;
+ uint32_t command:11;
+
+ GPU_EXPORT static const int32_t kMaxSize = (1 << 21) - 1;
+
+ void Init(uint32_t _command, int32_t _size) {
+ DCHECK_LE(_size, kMaxSize);
+ command = _command;
+ size = _size;
+ }
+
+ // Sets the header based on the passed in command. Can not be used for
+ // variable sized commands like immediate commands or Noop.
+ template <typename T>
+ void SetCmd() {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ Init(T::kCmdId, ComputeNumEntries(sizeof(T))); // NOLINT
+ }
+
+ // Sets the header by a size in bytes of the immediate data after the command.
+ template <typename T>
+ void SetCmdBySize(uint32_t size_of_data_in_bytes) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ Init(T::kCmdId,
+ ComputeNumEntries(sizeof(T) + size_of_data_in_bytes)); // NOLINT
+ }
+
+ // Sets the header by a size in bytes.
+ template <typename T>
+ void SetCmdByTotalSize(uint32_t size_in_bytes) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ DCHECK_GE(size_in_bytes, sizeof(T)); // NOLINT
+ Init(T::kCmdId, ComputeNumEntries(size_in_bytes));
+ }
+};
+
+COMPILE_ASSERT(sizeof(CommandHeader) == 4, Sizeof_CommandHeader_is_not_4);
+
+// Union that defines possible command buffer entries.
+union CommandBufferEntry {
+ CommandHeader value_header;
+ uint32_t value_uint32;
+ int32_t value_int32;
+ float value_float;
+};
+
+#define GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT 4
+const size_t kCommandBufferEntrySize = GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT;
+
+COMPILE_ASSERT(sizeof(CommandBufferEntry) == kCommandBufferEntrySize,
+ Sizeof_CommandBufferEntry_is_not_4);
+
+// Command buffer is GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT byte aligned.
+#pragma pack(push, GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT)
+
+// Gets the address of memory just after a structure in a typesafe way. This is
+// used for IMMEDIATE commands to get the address of the place to put the data.
+// Immediate command put their data direclty in the command buffer.
+// Parameters:
+// cmd: Address of command.
+template <typename T>
+void* ImmediateDataAddress(T* cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return reinterpret_cast<char*>(cmd) + sizeof(*cmd);
+}
+
+// Gets the address of the place to put the next command in a typesafe way.
+// This can only be used for fixed sized commands.
+template <typename T>
+// Parameters:
+// cmd: Address of command.
+void* NextCmdAddress(void* cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ return reinterpret_cast<char*>(cmd) + sizeof(T);
+}
+
+// Gets the address of the place to put the next command in a typesafe way.
+// This can only be used for variable sized command like IMMEDIATE commands.
+// Parameters:
+// cmd: Address of command.
+// size_of_data_in_bytes: Size of the data for the command.
+template <typename T>
+void* NextImmediateCmdAddress(void* cmd, uint32_t size_of_data_in_bytes) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return reinterpret_cast<char*>(cmd) + sizeof(T) + // NOLINT
+ RoundSizeToMultipleOfEntries(size_of_data_in_bytes);
+}
+
+// Gets the address of the place to put the next command in a typesafe way.
+// This can only be used for variable sized command like IMMEDIATE commands.
+// Parameters:
+// cmd: Address of command.
+// size_of_cmd_in_bytes: Size of the cmd and data.
+template <typename T>
+void* NextImmediateCmdAddressTotalSize(void* cmd,
+ uint32_t total_size_in_bytes) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ DCHECK_GE(total_size_in_bytes, sizeof(T)); // NOLINT
+ return reinterpret_cast<char*>(cmd) +
+ RoundSizeToMultipleOfEntries(total_size_in_bytes);
+}
+
+namespace cmd {
+
+// This macro is used to safely and convienently expand the list of commnad
+// buffer commands in to various lists and never have them get out of sync. To
+// add a new command, add it this list, create the corresponding structure below
+// and then add a function in gapi_decoder.cc called Handle_COMMAND_NAME where
+// COMMAND_NAME is the name of your command structure.
+//
+// NOTE: THE ORDER OF THESE MUST NOT CHANGE (their id is derived by order)
+#define COMMON_COMMAND_BUFFER_CMDS(OP) \
+ OP(Noop) /* 0 */ \
+ OP(SetToken) /* 1 */ \
+ OP(SetBucketSize) /* 2 */ \
+ OP(SetBucketData) /* 3 */ \
+ OP(SetBucketDataImmediate) /* 4 */ \
+ OP(GetBucketStart) /* 5 */ \
+ OP(GetBucketData) /* 6 */ \
+
+// Common commands.
+enum CommandId {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) k ## name,
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+
+ kNumCommands,
+ kLastCommonId = 255 // reserve 256 spaces for common commands.
+};
+
+COMPILE_ASSERT(kNumCommands - 1 <= kLastCommonId, Too_many_common_commands);
+
+const char* GetCommandName(CommandId id);
+
+// A Noop command.
+struct Noop {
+ typedef Noop ValueType;
+ static const CommandId kCmdId = kNoop;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader(uint32_t skip_count) {
+ DCHECK_GT(skip_count, 0u);
+ header.Init(kCmdId, skip_count);
+ }
+
+ void Init(uint32_t skip_count) {
+ SetHeader(skip_count);
+ }
+
+ static void* Set(void* cmd, uint32_t skip_count) {
+ static_cast<ValueType*>(cmd)->Init(skip_count);
+ return NextImmediateCmdAddress<ValueType>(
+ cmd, skip_count * sizeof(CommandBufferEntry)); // NOLINT
+ }
+
+ CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(Noop) == 4, Sizeof_Noop_is_not_4);
+COMPILE_ASSERT(offsetof(Noop, header) == 0, Offsetof_Noop_header_not_0);
+
+// The SetToken command puts a token in the command stream that you can
+// use to check if that token has been passed in the command stream.
+struct SetToken {
+ typedef SetToken ValueType;
+ static const CommandId kCmdId = kSetToken;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(uint32_t _token) {
+ SetHeader();
+ token = _token;
+ }
+ static void* Set(void* cmd, uint32_t token) {
+ static_cast<ValueType*>(cmd)->Init(token);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+ uint32_t token;
+};
+
+COMPILE_ASSERT(sizeof(SetToken) == 8, Sizeof_SetToken_is_not_8);
+COMPILE_ASSERT(offsetof(SetToken, header) == 0,
+ Offsetof_SetToken_header_not_0);
+COMPILE_ASSERT(offsetof(SetToken, token) == 4,
+ Offsetof_SetToken_token_not_4);
+
+// Sets the size of a bucket for collecting data on the service side.
+// This is a utility for gathering data on the service side so it can be used
+// all at once when some service side API is called. It removes the need to add
+// special commands just to support a particular API. For example, any API
+// command that needs a string needs a way to send that string to the API over
+// the command buffers. While you can require that the command buffer or
+// transfer buffer be large enough to hold the largest string you can send,
+// using this command removes that restriction by letting you send smaller
+// pieces over and build up the data on the service side.
+//
+// You can clear a bucket on the service side and thereby free memory by sending
+// a size of 0.
+struct SetBucketSize {
+ typedef SetBucketSize ValueType;
+ static const CommandId kCmdId = kSetBucketSize;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(uint32_t _bucket_id, uint32_t _size) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ size = _size;
+ }
+ static void* Set(void* cmd, uint32_t _bucket_id, uint32_t _size) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id, _size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t size;
+};
+
+COMPILE_ASSERT(sizeof(SetBucketSize) == 12, Sizeof_SetBucketSize_is_not_8);
+COMPILE_ASSERT(offsetof(SetBucketSize, header) == 0,
+ Offsetof_SetBucketSize_header_not_0);
+COMPILE_ASSERT(offsetof(SetBucketSize, bucket_id) == 4,
+ Offsetof_SetBucketSize_bucket_id_4);
+COMPILE_ASSERT(offsetof(SetBucketSize, size) == 8,
+ Offsetof_SetBucketSize_size_8);
+
+// Sets the contents of a portion of a bucket on the service side from data in
+// shared memory.
+// See SetBucketSize.
+struct SetBucketData {
+ typedef SetBucketData ValueType;
+ static const CommandId kCmdId = kSetBucketData;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size,
+ uint32_t _shared_memory_id,
+ uint32_t _shared_memory_offset) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ offset = _offset;
+ size = _size;
+ shared_memory_id = _shared_memory_id;
+ shared_memory_offset = _shared_memory_offset;
+ }
+ static void* Set(void* cmd,
+ uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size,
+ uint32_t _shared_memory_id,
+ uint32_t _shared_memory_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _bucket_id,
+ _offset,
+ _size,
+ _shared_memory_id,
+ _shared_memory_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t shared_memory_id;
+ uint32_t shared_memory_offset;
+};
+
+COMPILE_ASSERT(sizeof(SetBucketData) == 24, Sizeof_SetBucketData_is_not_24);
+COMPILE_ASSERT(offsetof(SetBucketData, header) == 0,
+ Offsetof_SetBucketData_header_not_0);
+COMPILE_ASSERT(offsetof(SetBucketData, bucket_id) == 4,
+ Offsetof_SetBucketData_bucket_id_not_4);
+COMPILE_ASSERT(offsetof(SetBucketData, offset) == 8,
+ Offsetof_SetBucketData_offset_not_8);
+COMPILE_ASSERT(offsetof(SetBucketData, size) == 12,
+ Offsetof_SetBucketData_size_not_12);
+COMPILE_ASSERT(offsetof(SetBucketData, shared_memory_id) == 16,
+ Offsetof_SetBucketData_shared_memory_id_not_16);
+COMPILE_ASSERT(offsetof(SetBucketData, shared_memory_offset) == 20,
+ Offsetof_SetBucketData_shared_memory_offset_not_20);
+
+// Sets the contents of a portion of a bucket on the service side from data in
+// the command buffer.
+// See SetBucketSize.
+struct SetBucketDataImmediate {
+ typedef SetBucketDataImmediate ValueType;
+ static const CommandId kCmdId = kSetBucketDataImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader(uint32_t size) {
+ header.SetCmdBySize<ValueType>(size);
+ }
+
+ void Init(uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size) {
+ SetHeader(_size);
+ bucket_id = _bucket_id;
+ offset = _offset;
+ size = _size;
+ }
+ static void* Set(void* cmd,
+ uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size) {
+ static_cast<ValueType*>(cmd)->Init(
+ _bucket_id,
+ _offset,
+ _size);
+ return NextImmediateCmdAddress<ValueType>(cmd, _size);
+ }
+
+ CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t offset;
+ uint32_t size;
+};
+
+COMPILE_ASSERT(sizeof(SetBucketDataImmediate) == 16,
+ Sizeof_SetBucketDataImmediate_is_not_24);
+COMPILE_ASSERT(offsetof(SetBucketDataImmediate, header) == 0,
+ Offsetof_SetBucketDataImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(SetBucketDataImmediate, bucket_id) == 4,
+ Offsetof_SetBucketDataImmediate_bucket_id_not_4);
+COMPILE_ASSERT(offsetof(SetBucketDataImmediate, offset) == 8,
+ Offsetof_SetBucketDataImmediate_offset_not_8);
+COMPILE_ASSERT(offsetof(SetBucketDataImmediate, size) == 12,
+ Offsetof_SetBucketDataImmediate_size_not_12);
+
+// Gets the start of a bucket the service has available. Sending a variable size
+// result back to the client and the portion of that result that fits in the
+// supplied shared memory. If the size of the result is larger than the supplied
+// shared memory the rest of the bucket's contents can be retrieved with
+// GetBucketData.
+//
+// This is used for example for any API that returns a string. The problem is
+// the largest thing you can send back in 1 command is the size of your shared
+// memory. This command along with GetBucketData implements a way to get a
+// result a piece at a time to help solve that problem in a generic way.
+struct GetBucketStart {
+ typedef GetBucketStart ValueType;
+ static const CommandId kCmdId = kGetBucketStart;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(uint32_t _bucket_id,
+ uint32_t _result_memory_id,
+ uint32_t _result_memory_offset,
+ uint32_t _data_memory_size,
+ uint32_t _data_memory_id,
+ uint32_t _data_memory_offset) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ result_memory_id = _result_memory_id;
+ result_memory_offset = _result_memory_offset;
+ data_memory_size = _data_memory_size;
+ data_memory_id = _data_memory_id;
+ data_memory_offset = _data_memory_offset;
+ }
+ static void* Set(void* cmd,
+ uint32_t _bucket_id,
+ uint32_t _result_memory_id,
+ uint32_t _result_memory_offset,
+ uint32_t _data_memory_size,
+ uint32_t _data_memory_id,
+ uint32_t _data_memory_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _bucket_id,
+ _result_memory_id,
+ _result_memory_offset,
+ _data_memory_size,
+ _data_memory_id,
+ _data_memory_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t result_memory_id;
+ uint32_t result_memory_offset;
+ uint32_t data_memory_size;
+ uint32_t data_memory_id;
+ uint32_t data_memory_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetBucketStart) == 28, Sizeof_GetBucketStart_is_not_28);
+COMPILE_ASSERT(offsetof(GetBucketStart, header) == 0,
+ Offsetof_GetBucketStart_header_not_0);
+COMPILE_ASSERT(offsetof(GetBucketStart, bucket_id) == 4,
+ Offsetof_GetBucketStart_bucket_id_not_4);
+COMPILE_ASSERT(offsetof(GetBucketStart, result_memory_id) == 8,
+ Offsetof_GetBucketStart_result_memory_id_not_8);
+COMPILE_ASSERT(offsetof(GetBucketStart, result_memory_offset) == 12,
+ Offsetof_GetBucketStart_result_memory_offset_not_12);
+COMPILE_ASSERT(offsetof(GetBucketStart, data_memory_size) == 16,
+ Offsetof_GetBucketStart_data_memory_size_not_16);
+COMPILE_ASSERT(offsetof(GetBucketStart, data_memory_id) == 20,
+ Offsetof_GetBucketStart_data_memory_id_not_20);
+COMPILE_ASSERT(offsetof(GetBucketStart, data_memory_offset) == 24,
+ Offsetof_GetBucketStart_data_memory_offset_not_24);
+
+// Gets a piece of a result the service as available.
+// See GetBucketSize.
+struct GetBucketData {
+ typedef GetBucketData ValueType;
+ static const CommandId kCmdId = kGetBucketData;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size,
+ uint32_t _shared_memory_id,
+ uint32_t _shared_memory_offset) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ offset = _offset;
+ size = _size;
+ shared_memory_id = _shared_memory_id;
+ shared_memory_offset = _shared_memory_offset;
+ }
+ static void* Set(void* cmd,
+ uint32_t _bucket_id,
+ uint32_t _offset,
+ uint32_t _size,
+ uint32_t _shared_memory_id,
+ uint32_t _shared_memory_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _bucket_id,
+ _offset,
+ _size,
+ _shared_memory_id,
+ _shared_memory_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t shared_memory_id;
+ uint32_t shared_memory_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetBucketData) == 24, Sizeof_GetBucketData_is_not_20);
+COMPILE_ASSERT(offsetof(GetBucketData, header) == 0,
+ Offsetof_GetBucketData_header_not_0);
+COMPILE_ASSERT(offsetof(GetBucketData, bucket_id) == 4,
+ Offsetof_GetBucketData_bucket_id_not_4);
+COMPILE_ASSERT(offsetof(GetBucketData, offset) == 8,
+ Offsetof_GetBucketData_offset_not_8);
+COMPILE_ASSERT(offsetof(GetBucketData, size) == 12,
+ Offsetof_GetBucketData_size_not_12);
+COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_id) == 16,
+ Offsetof_GetBucketData_shared_memory_id_not_16);
+COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_offset) == 20,
+ Offsetof_GetBucketData_shared_memory_offset_not_20);
+
+} // namespace cmd
+
+#pragma pack(pop)
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_CMD_BUFFER_COMMON_H_
+
diff --git a/gpu/command_buffer/common/command_buffer.h b/gpu/command_buffer/common/command_buffer.h
new file mode 100644
index 0000000..61b9142
--- /dev/null
+++ b/gpu/command_buffer/common/command_buffer.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_H_
+#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_H_
+
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/gpu_export.h"
+
+namespace base {
+class SharedMemory;
+}
+
+namespace gpu {
+
+// Common interface for CommandBuffer implementations.
+class GPU_EXPORT CommandBuffer {
+ public:
+ struct State {
+ State()
+ : num_entries(0),
+ get_offset(0),
+ put_offset(0),
+ token(-1),
+ error(error::kNoError),
+ context_lost_reason(error::kUnknown),
+ generation(0) {
+ }
+
+ // Size of the command buffer in command buffer entries.
+ int32 num_entries;
+
+ // The offset (in entries) from which the reader is reading.
+ int32 get_offset;
+
+ // The offset (in entries) at which the writer is writing.
+ int32 put_offset;
+
+ // The current token value. This is used by the writer to defer
+ // changes to shared memory objects until the reader has reached a certain
+ // point in the command buffer. The reader is responsible for updating the
+ // token value, for example in response to an asynchronous set token command
+ // embedded in the command buffer. The default token value is zero.
+ int32 token;
+
+ // Error status.
+ error::Error error;
+
+ // Lost context detail information.
+ error::ContextLostReason context_lost_reason;
+
+ // Generation index of this state. The generation index is incremented every
+ // time a new state is retrieved from the command processor, so that
+ // consistency can be kept even if IPC messages are processed out-of-order.
+ uint32 generation;
+ };
+
+ struct ConsoleMessage {
+ // An user supplied id.
+ int32 id;
+ // The message.
+ std::string message;
+ };
+
+ CommandBuffer() {
+ }
+
+ virtual ~CommandBuffer() {
+ }
+
+ // Check if a value is between a start and end value, inclusive, allowing
+ // for wrapping if start > end.
+ static bool InRange(int32 start, int32 end, int32 value) {
+ if (start <= end)
+ return start <= value && value <= end;
+ else
+ return start <= value || value <= end;
+ }
+
+ // Initialize the command buffer with the given size.
+ virtual bool Initialize() = 0;
+
+ // Returns the last state without synchronizing with the service.
+ virtual State GetLastState() = 0;
+
+ // Returns the last token without synchronizing with the service. Note that
+ // while you could just call GetLastState().token, GetLastState needs to be
+ // fast as it is called for every command where GetLastToken is only called
+ // by code that needs to know the last token so it can be slower but more up
+ // to date than GetLastState.
+ virtual int32 GetLastToken() = 0;
+
+ // The writer calls this to update its put offset. This ensures the reader
+ // sees the latest added commands, and will eventually process them. On the
+ // service side, commands are processed up to the given put_offset before
+ // subsequent Flushes on the same GpuChannel.
+ virtual void Flush(int32 put_offset) = 0;
+
+ // The writer calls this to wait until the current token is within a
+ // specific range, inclusive. Can return early if an error is generated.
+ virtual void WaitForTokenInRange(int32 start, int32 end) = 0;
+
+ // The writer calls this to wait until the current get offset is within a
+ // specific range, inclusive. Can return early if an error is generated.
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) = 0;
+
+ // Sets the buffer commands are read from.
+ // Also resets the get and put offsets to 0.
+ virtual void SetGetBuffer(int32 transfer_buffer_id) = 0;
+
+ // Create a transfer buffer of the given size. Returns its ID or -1 on
+ // error.
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) = 0;
+
+ // Destroy a transfer buffer. The ID must be positive.
+ virtual void DestroyTransferBuffer(int32 id) = 0;
+
+// The NaCl Win64 build only really needs the struct definitions above; having
+// GetLastError declared would mean we'd have to also define it, and pull more
+// of gpu in to the NaCl Win64 build.
+#if !defined(NACL_WIN64)
+ // TODO(apatrick): this is a temporary optimization while skia is calling
+ // RendererGLContext::MakeCurrent prior to every GL call. It saves returning 6
+ // ints redundantly when only the error is needed for the CommandBufferProxy
+ // implementation.
+ virtual error::Error GetLastError();
+#endif
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CommandBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_H_
diff --git a/gpu/command_buffer/common/command_buffer_mock.cc b/gpu/command_buffer/common/command_buffer_mock.cc
new file mode 100644
index 0000000..a64dc45
--- /dev/null
+++ b/gpu/command_buffer/common/command_buffer_mock.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/command_buffer_mock.h"
+
+namespace gpu {
+
+MockCommandBuffer::MockCommandBuffer() {
+ ON_CALL(*this, GetTransferBuffer(testing::_))
+ .WillByDefault(testing::Return(scoped_refptr<gpu::Buffer>()));
+}
+
+MockCommandBuffer::~MockCommandBuffer() {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/command_buffer_mock.h b/gpu/command_buffer/common/command_buffer_mock.h
new file mode 100644
index 0000000..1877470
--- /dev/null
+++ b/gpu/command_buffer/common/command_buffer_mock.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_MOCK_H_
+#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_MOCK_H_
+
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+class SharedMemory;
+}
+
+namespace gpu {
+
+// An NPObject that implements a shared memory command buffer and a synchronous
+// API to manage the put and get pointers.
+class MockCommandBuffer : public CommandBufferServiceBase {
+ public:
+ MockCommandBuffer();
+ virtual ~MockCommandBuffer();
+
+ MOCK_METHOD0(Initialize, bool());
+ MOCK_METHOD0(GetLastState, State());
+ MOCK_METHOD0(GetLastToken, int32());
+ MOCK_METHOD1(Flush, void(int32 put_offset));
+ MOCK_METHOD2(WaitForTokenInRange, void(int32 start, int32 end));
+ MOCK_METHOD2(WaitForGetOffsetInRange, void(int32 start, int32 end));
+ MOCK_METHOD1(SetGetBuffer, void(int32 transfer_buffer_id));
+ MOCK_METHOD1(SetGetOffset, void(int32 get_offset));
+ MOCK_METHOD2(CreateTransferBuffer,
+ scoped_refptr<gpu::Buffer>(size_t size, int32* id));
+ MOCK_METHOD1(DestroyTransferBuffer, void(int32 id));
+ MOCK_METHOD1(GetTransferBuffer, scoped_refptr<gpu::Buffer>(int32 id));
+ MOCK_METHOD1(SetToken, void(int32 token));
+ MOCK_METHOD1(SetParseError, void(error::Error error));
+ MOCK_METHOD1(SetContextLostReason,
+ void(error::ContextLostReason context_lost_reason));
+ MOCK_METHOD0(InsertSyncPoint, uint32());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockCommandBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_MOCK_H_
diff --git a/gpu/command_buffer/common/command_buffer_shared.h b/gpu/command_buffer/common/command_buffer_shared.h
new file mode 100644
index 0000000..fafe767
--- /dev/null
+++ b/gpu/command_buffer/common/command_buffer_shared.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_SHARED_H_
+#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_SHARED_H_
+
+#include "command_buffer.h"
+#include "base/atomicops.h"
+
+namespace gpu {
+
+// This is a standard 4-slot asynchronous communication mechanism, used to
+// ensure that the reader gets a consistent copy of what the writer wrote.
+template<typename T>
+class SharedState {
+ T states_[2][2];
+ base::subtle::Atomic32 reading_;
+ base::subtle::Atomic32 latest_;
+ base::subtle::Atomic32 slots_[2];
+
+public:
+
+ void Initialize() {
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ states_[i][j] = T();
+ }
+ }
+ base::subtle::NoBarrier_Store(&reading_, 0);
+ base::subtle::NoBarrier_Store(&latest_, 0);
+ base::subtle::NoBarrier_Store(&slots_[0], 0);
+ base::subtle::Release_Store(&slots_[1], 0);
+ base::subtle::MemoryBarrier();
+ }
+
+ void Write(const T& state) {
+ int towrite = !base::subtle::Acquire_Load(&reading_);
+ int index = !base::subtle::Acquire_Load(&slots_[towrite]);
+ states_[towrite][index] = state;
+ base::subtle::Release_Store(&slots_[towrite], index);
+ base::subtle::Release_Store(&latest_, towrite);
+ base::subtle::MemoryBarrier();
+ }
+
+ // Attempt to update the state, updating only if the generation counter is
+ // newer.
+ void Read(T* state) {
+ base::subtle::MemoryBarrier();
+ int toread = !!base::subtle::Acquire_Load(&latest_);
+ base::subtle::Release_Store(&reading_, toread);
+ base::subtle::MemoryBarrier();
+ int index = !!base::subtle::Acquire_Load(&slots_[toread]);
+ if (states_[toread][index].generation - state->generation < 0x80000000U)
+ *state = states_[toread][index];
+ }
+};
+
+typedef SharedState<CommandBuffer::State> CommandBufferSharedState;
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_SHARED_H_
diff --git a/gpu/command_buffer/common/command_buffer_shared_test.cc b/gpu/command_buffer/common/command_buffer_shared_test.cc
new file mode 100644
index 0000000..d2c599a
--- /dev/null
+++ b/gpu/command_buffer/common/command_buffer_shared_test.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the CommandBufferSharedState class.
+
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class CommandBufferSharedTest : public testing::Test {
+ protected:
+
+ virtual void SetUp() {
+ shared_state_.reset(new CommandBufferSharedState());
+ shared_state_->Initialize();
+ }
+
+ scoped_ptr<CommandBufferSharedState> shared_state_;
+};
+
+TEST_F(CommandBufferSharedTest, TestBasic) {
+ CommandBuffer::State state;
+
+ shared_state_->Read(&state);
+
+ EXPECT_LT(state.generation, 0x80000000);
+ EXPECT_EQ(state.get_offset, 0);
+ EXPECT_EQ(state.put_offset, 0);
+ EXPECT_EQ(state.token, -1);
+ EXPECT_EQ(state.error, gpu::error::kNoError);
+ EXPECT_EQ(state.context_lost_reason, gpu::error::kUnknown);
+}
+
+static const int kSize = 100000;
+
+void WriteToState(int32 *buffer,
+ CommandBufferSharedState* shared_state) {
+ CommandBuffer::State state;
+ for (int i = 0; i < kSize; i++) {
+ state.token = i - 1;
+ state.get_offset = i + 1;
+ state.generation = i + 2;
+ state.error = static_cast<gpu::error::Error>(i + 3);
+ // Ensure that the producer doesn't update the buffer until after the
+ // consumer reads from it.
+ EXPECT_EQ(buffer[i], 0);
+
+ shared_state->Write(state);
+ }
+}
+
+TEST_F(CommandBufferSharedTest, TestConsistency) {
+ scoped_ptr<int32[]> buffer;
+ buffer.reset(new int32[kSize]);
+ base::Thread consumer("Reader Thread");
+
+ memset(buffer.get(), 0, kSize * sizeof(int32));
+
+ consumer.Start();
+ consumer.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&WriteToState, buffer.get(),
+ shared_state_.get()));
+
+ CommandBuffer::State last_state;
+ while (1) {
+ CommandBuffer::State state = last_state;
+
+ shared_state_->Read(&state);
+
+ if (state.generation < last_state.generation)
+ continue;
+
+ if (state.get_offset >= 1) {
+ buffer[state.get_offset - 1] = 1;
+ // Check that the state is consistent
+ EXPECT_LE(last_state.token, state.token);
+ EXPECT_LE(last_state.generation, state.generation);
+ last_state = state;
+ EXPECT_EQ(state.token, state.get_offset - 2);
+ EXPECT_EQ(state.generation,
+ static_cast<unsigned int>(state.get_offset) + 1);
+ EXPECT_EQ(state.error, state.get_offset + 2);
+
+ if (state.get_offset == kSize)
+ break;
+ }
+ }
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/common/constants.h b/gpu/command_buffer/common/constants.h
new file mode 100644
index 0000000..054708f
--- /dev/null
+++ b/gpu/command_buffer/common/constants.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_CONSTANTS_H_
+#define GPU_COMMAND_BUFFER_COMMON_CONSTANTS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace gpu {
+
+typedef int32_t CommandBufferOffset;
+const CommandBufferOffset kInvalidCommandBufferOffset = -1;
+
+// This enum must stay in sync with NPDeviceContext3DError.
+namespace error {
+ enum Error {
+ kNoError,
+ kInvalidSize,
+ kOutOfBounds,
+ kUnknownCommand,
+ kInvalidArguments,
+ kLostContext,
+ kGenericError,
+ kDeferCommandUntilLater
+ };
+
+ // Return true if the given error code is an actual error.
+ inline bool IsError(Error error) {
+ return error != kNoError && error != kDeferCommandUntilLater;
+ }
+
+ // Provides finer grained information about why the context was lost.
+ enum ContextLostReason {
+ // This context definitely provoked the loss of context.
+ kGuilty,
+
+ // This context definitely did not provoke the loss of context.
+ kInnocent,
+
+ // It is unknown whether this context provoked the loss of context.
+ kUnknown,
+ kContextLostReasonLast = kUnknown
+ };
+}
+
+// Invalid shared memory Id, returned by RegisterSharedMemory in case of
+// failure.
+const int32_t kInvalidSharedMemoryId = -1;
+
+// Common Command Buffer shared memory transfer buffer ID.
+const int32_t kCommandBufferSharedMemoryId = 4;
+
+// The size to set for the program cache.
+const size_t kDefaultMaxProgramCacheMemoryBytes = 6 * 1024 * 1024;
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_CONSTANTS_H_
diff --git a/gpu/command_buffer/common/debug_marker_manager.cc b/gpu/command_buffer/common/debug_marker_manager.cc
new file mode 100644
index 0000000..5ac37d0
--- /dev/null
+++ b/gpu/command_buffer/common/debug_marker_manager.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+DebugMarkerManager::Group::Group(const std::string& name)
+ : name_(name),
+ marker_(name) {
+}
+
+DebugMarkerManager::Group::~Group() {
+}
+
+void DebugMarkerManager::Group::SetMarker(const std::string& marker) {
+ marker_ = name_ + "." + marker;
+}
+
+DebugMarkerManager::DebugMarkerManager() {
+ // Push root group.
+ group_stack_.push(Group(std::string()));
+}
+
+DebugMarkerManager::~DebugMarkerManager() {
+}
+
+void DebugMarkerManager::SetMarker(const std::string& marker) {
+ group_stack_.top().SetMarker(marker);
+}
+
+const std::string& DebugMarkerManager::GetMarker() const {
+ return group_stack_.top().marker();
+}
+
+void DebugMarkerManager::PushGroup(const std::string& name) {
+ group_stack_.push(Group(group_stack_.top().name() + "." + name));
+}
+
+void DebugMarkerManager::PopGroup(void) {
+ if (group_stack_.size() > 1) {
+ group_stack_.pop();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/common/debug_marker_manager.h b/gpu/command_buffer/common/debug_marker_manager.h
new file mode 100644
index 0000000..65e0cd0
--- /dev/null
+++ b/gpu/command_buffer/common/debug_marker_manager.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_DEBUG_MARKER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_DEBUG_MARKER_MANAGER_H_
+
+#include <stack>
+#include <string>
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Tracks debug marker.
+class GPU_EXPORT DebugMarkerManager {
+ public:
+ DebugMarkerManager();
+ ~DebugMarkerManager();
+
+ // Gets the current marker on the top group.
+ const std::string& GetMarker() const;
+ // Sets the current marker on the top group.
+ void SetMarker(const std::string& marker);
+ // Pushes a new group.
+ void PushGroup(const std::string& name);
+ // Removes the top group. This is safe to call even when stack is empty.
+ void PopGroup(void);
+
+ private:
+ // Info about Buffers currently in the system.
+ class Group {
+ public:
+ explicit Group(const std::string& name);
+ ~Group();
+
+ const std::string& name() const {
+ return name_;
+ }
+
+ void SetMarker(const std::string& marker);
+
+ const std::string& marker() const {
+ return marker_;
+ }
+
+ private:
+ std::string name_;
+ std::string marker_;
+ };
+
+ typedef std::stack<Group> GroupStack;
+
+ GroupStack group_stack_;
+ std::string empty_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_DEBUG_MARKER_MANAGER_H_
+
diff --git a/gpu/command_buffer/common/debug_marker_manager_unittest.cc b/gpu/command_buffer/common/debug_marker_manager_unittest.cc
new file mode 100644
index 0000000..fa1cfcc
--- /dev/null
+++ b/gpu/command_buffer/common/debug_marker_manager_unittest.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+class DebugMarkerManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+
+ DebugMarkerManager manager_;
+};
+
+TEST_F(DebugMarkerManagerTest, Basic) {
+ // Test we can get root
+ EXPECT_STREQ("", manager_.GetMarker().c_str());
+ // Test it's safe to pop an empty stack.
+ manager_.PopGroup();
+ // Test we can still get root.
+ EXPECT_STREQ("", manager_.GetMarker().c_str());
+ // Test setting a marker.
+ manager_.SetMarker("mark1");
+ EXPECT_STREQ(".mark1", manager_.GetMarker().c_str());
+ manager_.SetMarker("mark2");
+ EXPECT_STREQ(".mark2", manager_.GetMarker().c_str());
+ // Test pushing a group.
+ manager_.PushGroup("abc");
+ EXPECT_STREQ(".abc", manager_.GetMarker().c_str());
+ // Test setting a marker on the group
+ manager_.SetMarker("mark3");
+ EXPECT_STREQ(".abc.mark3", manager_.GetMarker().c_str());
+ manager_.SetMarker("mark4");
+ EXPECT_STREQ(".abc.mark4", manager_.GetMarker().c_str());
+ // Test pushing a 2nd group.
+ manager_.PushGroup("def");
+ EXPECT_STREQ(".abc.def", manager_.GetMarker().c_str());
+ // Test setting a marker on the group
+ manager_.SetMarker("mark5");
+ EXPECT_STREQ(".abc.def.mark5", manager_.GetMarker().c_str());
+ manager_.SetMarker("mark6");
+ EXPECT_STREQ(".abc.def.mark6", manager_.GetMarker().c_str());
+ // Test poping 2nd group.
+ manager_.PopGroup();
+ EXPECT_STREQ(".abc.mark4", manager_.GetMarker().c_str());
+ manager_.PopGroup();
+ EXPECT_STREQ(".mark2", manager_.GetMarker().c_str());
+ manager_.PopGroup();
+ EXPECT_STREQ(".mark2", manager_.GetMarker().c_str());
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/common/gles2_cmd_format.cc b/gpu/command_buffer/common/gles2_cmd_format.cc
new file mode 100644
index 0000000..5b437a5
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_format.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the binary format definition of the command buffer and
+// command buffer commands.
+
+// We explicitly do NOT include gles2_cmd_format.h here because client side
+// and service side have different requirements.
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+
+namespace gpu {
+namespace gles2 {
+
+#include "gpu/command_buffer/common/gles2_cmd_ids_autogen.h"
+
+const char* GetCommandName(CommandId id) {
+ static const char* const names[] = {
+ #define GLES2_CMD_OP(name) "k" # name,
+
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+
+ #undef GLES2_CMD_OP
+ };
+
+ size_t index = static_cast<size_t>(id) - kStartPoint - 1;
+ return (index < arraysize(names)) ? names[index] : "*unknown-command*";
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/common/gles2_cmd_format.h b/gpu/command_buffer/common/gles2_cmd_format.h
new file mode 100644
index 0000000..393303e
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_format.h
@@ -0,0 +1,276 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the GLES2 command buffer commands.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_H_
+
+
+#include <KHR/khrplatform.h>
+
+#include <stdint.h>
+#include <string.h>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/bitfield_helpers.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/gles2_cmd_ids.h"
+
+// GL types are forward declared to avoid including the GL headers. The problem
+// is determining which GL headers to include from code that is common to the
+// client and service sides (GLES2 or one of several GL implementations).
+typedef unsigned int GLenum;
+typedef unsigned int GLbitfield;
+typedef unsigned int GLuint;
+typedef int GLint;
+typedef int GLsizei;
+typedef unsigned char GLboolean;
+typedef signed char GLbyte;
+typedef short GLshort;
+typedef unsigned char GLubyte;
+typedef unsigned short GLushort;
+typedef unsigned long GLulong;
+typedef float GLfloat;
+typedef float GLclampf;
+typedef double GLdouble;
+typedef double GLclampd;
+typedef void GLvoid;
+typedef khronos_intptr_t GLintptr;
+typedef khronos_ssize_t GLsizeiptr;
+
+namespace gpu {
+namespace gles2 {
+
+// Command buffer is GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT byte aligned.
+#pragma pack(push, GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT)
+
+namespace id_namespaces {
+
+// These are used when contexts share resources.
+enum IdNamespaces {
+ kBuffers,
+ kFramebuffers,
+ kProgramsAndShaders,
+ kRenderbuffers,
+ kTextures,
+ kQueries,
+ kVertexArrays,
+ kNumIdNamespaces
+};
+
+// These numbers must not change
+COMPILE_ASSERT(kBuffers == 0, kBuffers_is_not_0);
+COMPILE_ASSERT(kFramebuffers == 1, kFramebuffers_is_not_1);
+COMPILE_ASSERT(kProgramsAndShaders == 2, kProgramsAndShaders_is_not_2);
+COMPILE_ASSERT(kRenderbuffers == 3, kRenderbuffers_is_not_3);
+COMPILE_ASSERT(kTextures == 4, kTextures_is_not_4);
+
+} // namespace id_namespaces
+
+// Used for some glGetXXX commands that return a result through a pointer. We
+// need to know if the command succeeded or not and the size of the result. If
+// the command failed its result size will 0.
+template <typename T>
+struct SizedResult {
+ typedef T Type;
+
+ T* GetData() {
+ return static_cast<T*>(static_cast<void*>(&data));
+ }
+
+ // Returns the total size in bytes of the SizedResult for a given number of
+ // results including the size field.
+ static size_t ComputeSize(size_t num_results) {
+ return sizeof(T) * num_results + sizeof(uint32_t); // NOLINT
+ }
+
+ // Returns the total size in bytes of the SizedResult for a given size of
+ // results.
+ static size_t ComputeSizeFromBytes(size_t size_of_result_in_bytes) {
+ return size_of_result_in_bytes + sizeof(uint32_t); // NOLINT
+ }
+
+ // Returns the maximum number of results for a given buffer size.
+ static uint32_t ComputeMaxResults(size_t size_of_buffer) {
+ return (size_of_buffer >= sizeof(uint32_t)) ?
+ ((size_of_buffer - sizeof(uint32_t)) / sizeof(T)) : 0; // NOLINT
+ }
+
+ // Set the size for a given number of results.
+ void SetNumResults(size_t num_results) {
+ size = sizeof(T) * num_results; // NOLINT
+ }
+
+ // Get the number of elements in the result
+ int32_t GetNumResults() const {
+ return size / sizeof(T); // NOLINT
+ }
+
+ // Copy the result.
+ void CopyResult(void* dst) const {
+ memcpy(dst, &data, size);
+ }
+
+ uint32_t size; // in bytes.
+ int32_t data; // this is just here to get an offset.
+};
+
+COMPILE_ASSERT(sizeof(SizedResult<int8_t>) == 8, SizedResult_size_not_8);
+COMPILE_ASSERT(offsetof(SizedResult<int8_t>, size) == 0,
+ OffsetOf_SizedResult_size_not_0);
+COMPILE_ASSERT(offsetof(SizedResult<int8_t>, data) == 4,
+ OffsetOf_SizedResult_data_not_4);
+
+// The data for one attrib or uniform from GetProgramInfoCHROMIUM.
+struct ProgramInput {
+ uint32_t type; // The type (GL_VEC3, GL_MAT3, GL_SAMPLER_2D, etc.
+ int32_t size; // The size (how big the array is for uniforms)
+ uint32_t location_offset; // offset from ProgramInfoHeader to 'size'
+ // locations for uniforms, 1 for attribs.
+ uint32_t name_offset; // offset from ProgrmaInfoHeader to start of name.
+ uint32_t name_length; // length of the name.
+};
+
+// The format of the bucket filled out by GetProgramInfoCHROMIUM
+struct ProgramInfoHeader {
+ uint32_t link_status;
+ uint32_t num_attribs;
+ uint32_t num_uniforms;
+ // ProgramInput inputs[num_attribs + num_uniforms];
+};
+
+// The format of QuerySync used by EXT_occlusion_query_boolean
+struct QuerySync {
+ void Reset() {
+ process_count = 0;
+ result = 0;
+ }
+
+ base::subtle::Atomic32 process_count;
+ uint64_t result;
+};
+
+struct AsyncUploadSync {
+ void Reset() {
+ base::subtle::Release_Store(&async_upload_token, 0);
+ }
+
+ void SetAsyncUploadToken(uint32_t token) {
+ DCHECK_NE(token, 0u);
+ base::subtle::Release_Store(&async_upload_token, token);
+ }
+
+ bool HasAsyncUploadTokenPassed(uint32_t token) {
+ DCHECK_NE(token, 0u);
+ uint32_t current_token = base::subtle::Acquire_Load(&async_upload_token);
+ return (current_token - token < 0x80000000);
+ }
+
+ base::subtle::Atomic32 async_upload_token;
+};
+
+COMPILE_ASSERT(sizeof(ProgramInput) == 20, ProgramInput_size_not_20);
+COMPILE_ASSERT(offsetof(ProgramInput, type) == 0,
+ OffsetOf_ProgramInput_type_not_0);
+COMPILE_ASSERT(offsetof(ProgramInput, size) == 4,
+ OffsetOf_ProgramInput_size_not_4);
+COMPILE_ASSERT(offsetof(ProgramInput, location_offset) == 8,
+ OffsetOf_ProgramInput_location_offset_not_8);
+COMPILE_ASSERT(offsetof(ProgramInput, name_offset) == 12,
+ OffsetOf_ProgramInput_name_offset_not_12);
+COMPILE_ASSERT(offsetof(ProgramInput, name_length) == 16,
+ OffsetOf_ProgramInput_name_length_not_16);
+
+COMPILE_ASSERT(sizeof(ProgramInfoHeader) == 12, ProgramInfoHeader_size_not_12);
+COMPILE_ASSERT(offsetof(ProgramInfoHeader, link_status) == 0,
+ OffsetOf_ProgramInfoHeader_link_status_not_0);
+COMPILE_ASSERT(offsetof(ProgramInfoHeader, num_attribs) == 4,
+ OffsetOf_ProgramInfoHeader_num_attribs_not_4);
+COMPILE_ASSERT(offsetof(ProgramInfoHeader, num_uniforms) == 8,
+ OffsetOf_ProgramInfoHeader_num_uniforms_not_8);
+
+namespace cmds {
+
+#include "../common/gles2_cmd_format_autogen.h"
+
+// These are hand written commands.
+// TODO(gman): Attempt to make these auto-generated.
+
+struct GenMailboxCHROMIUM {
+ typedef GenMailboxCHROMIUM ValueType;
+ static const CommandId kCmdId = kGenMailboxCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+ CommandHeader header;
+};
+
+struct InsertSyncPointCHROMIUM {
+ typedef InsertSyncPointCHROMIUM ValueType;
+ static const CommandId kCmdId = kInsertSyncPointCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+ CommandHeader header;
+};
+
+struct CreateAndConsumeTextureCHROMIUMImmediate {
+ typedef CreateAndConsumeTextureCHROMIUMImmediate ValueType;
+ static const CommandId kCmdId = kCreateAndConsumeTextureCHROMIUMImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLbyte) * 64); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader(uint32_t size_in_bytes) {
+ header.SetCmdByTotalSize<ValueType>(size_in_bytes);
+ }
+
+ void Init(GLenum _target, uint32_t _client_id, const GLbyte* _mailbox) {
+ SetHeader(ComputeSize());
+ target = _target;
+ client_id = _client_id;
+ memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ uint32_t _client_id,
+ const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_target, _client_id, _mailbox);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t client_id;
+};
+
+COMPILE_ASSERT(sizeof(CreateAndConsumeTextureCHROMIUMImmediate) == 12,
+ Sizeof_CreateAndConsumeTextureCHROMIUMImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(CreateAndConsumeTextureCHROMIUMImmediate, header) == 0,
+ OffsetOf_CreateAndConsumeTextureCHROMIUMImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(CreateAndConsumeTextureCHROMIUMImmediate, target) == 4,
+ OffsetOf_CreateAndConsumeTextureCHROMIUMImmediate_target_not_4);
+COMPILE_ASSERT(
+ offsetof(CreateAndConsumeTextureCHROMIUMImmediate, client_id) == 8,
+ OffsetOf_CreateAndConsumeTextureCHROMIUMImmediate_client_id_not_8);
+
+
+#pragma pack(pop)
+
+} // namespace cmd
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
new file mode 100644
index 0000000..ccdc040
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -0,0 +1,9017 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
+
+struct ActiveTexture {
+ typedef ActiveTexture ValueType;
+ static const CommandId kCmdId = kActiveTexture;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _texture) {
+ SetHeader();
+ texture = _texture;
+ }
+
+ void* Set(void* cmd, GLenum _texture) {
+ static_cast<ValueType*>(cmd)->Init(_texture);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture;
+};
+
+COMPILE_ASSERT(sizeof(ActiveTexture) == 8, Sizeof_ActiveTexture_is_not_8);
+COMPILE_ASSERT(offsetof(ActiveTexture, header) == 0,
+ OffsetOf_ActiveTexture_header_not_0);
+COMPILE_ASSERT(offsetof(ActiveTexture, texture) == 4,
+ OffsetOf_ActiveTexture_texture_not_4);
+
+struct AttachShader {
+ typedef AttachShader ValueType;
+ static const CommandId kCmdId = kAttachShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, GLuint _shader) {
+ SetHeader();
+ program = _program;
+ shader = _shader;
+ }
+
+ void* Set(void* cmd, GLuint _program, GLuint _shader) {
+ static_cast<ValueType*>(cmd)->Init(_program, _shader);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t shader;
+};
+
+COMPILE_ASSERT(sizeof(AttachShader) == 12, Sizeof_AttachShader_is_not_12);
+COMPILE_ASSERT(offsetof(AttachShader, header) == 0,
+ OffsetOf_AttachShader_header_not_0);
+COMPILE_ASSERT(offsetof(AttachShader, program) == 4,
+ OffsetOf_AttachShader_program_not_4);
+COMPILE_ASSERT(offsetof(AttachShader, shader) == 8,
+ OffsetOf_AttachShader_shader_not_8);
+
+struct BindAttribLocationBucket {
+ typedef BindAttribLocationBucket ValueType;
+ static const CommandId kCmdId = kBindAttribLocationBucket;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, GLuint _index, uint32_t _name_bucket_id) {
+ SetHeader();
+ program = _program;
+ index = _index;
+ name_bucket_id = _name_bucket_id;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLuint _index,
+ uint32_t _name_bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_program, _index, _name_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t index;
+ uint32_t name_bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(BindAttribLocationBucket) == 16,
+ Sizeof_BindAttribLocationBucket_is_not_16);
+COMPILE_ASSERT(offsetof(BindAttribLocationBucket, header) == 0,
+ OffsetOf_BindAttribLocationBucket_header_not_0);
+COMPILE_ASSERT(offsetof(BindAttribLocationBucket, program) == 4,
+ OffsetOf_BindAttribLocationBucket_program_not_4);
+COMPILE_ASSERT(offsetof(BindAttribLocationBucket, index) == 8,
+ OffsetOf_BindAttribLocationBucket_index_not_8);
+COMPILE_ASSERT(offsetof(BindAttribLocationBucket, name_bucket_id) == 12,
+ OffsetOf_BindAttribLocationBucket_name_bucket_id_not_12);
+
+struct BindBuffer {
+ typedef BindBuffer ValueType;
+ static const CommandId kCmdId = kBindBuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _buffer) {
+ SetHeader();
+ target = _target;
+ buffer = _buffer;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _buffer) {
+ static_cast<ValueType*>(cmd)->Init(_target, _buffer);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t buffer;
+};
+
+COMPILE_ASSERT(sizeof(BindBuffer) == 12, Sizeof_BindBuffer_is_not_12);
+COMPILE_ASSERT(offsetof(BindBuffer, header) == 0,
+ OffsetOf_BindBuffer_header_not_0);
+COMPILE_ASSERT(offsetof(BindBuffer, target) == 4,
+ OffsetOf_BindBuffer_target_not_4);
+COMPILE_ASSERT(offsetof(BindBuffer, buffer) == 8,
+ OffsetOf_BindBuffer_buffer_not_8);
+
+struct BindFramebuffer {
+ typedef BindFramebuffer ValueType;
+ static const CommandId kCmdId = kBindFramebuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _framebuffer) {
+ SetHeader();
+ target = _target;
+ framebuffer = _framebuffer;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _framebuffer) {
+ static_cast<ValueType*>(cmd)->Init(_target, _framebuffer);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t framebuffer;
+};
+
+COMPILE_ASSERT(sizeof(BindFramebuffer) == 12, Sizeof_BindFramebuffer_is_not_12);
+COMPILE_ASSERT(offsetof(BindFramebuffer, header) == 0,
+ OffsetOf_BindFramebuffer_header_not_0);
+COMPILE_ASSERT(offsetof(BindFramebuffer, target) == 4,
+ OffsetOf_BindFramebuffer_target_not_4);
+COMPILE_ASSERT(offsetof(BindFramebuffer, framebuffer) == 8,
+ OffsetOf_BindFramebuffer_framebuffer_not_8);
+
+struct BindRenderbuffer {
+ typedef BindRenderbuffer ValueType;
+ static const CommandId kCmdId = kBindRenderbuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _renderbuffer) {
+ SetHeader();
+ target = _target;
+ renderbuffer = _renderbuffer;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _renderbuffer) {
+ static_cast<ValueType*>(cmd)->Init(_target, _renderbuffer);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t renderbuffer;
+};
+
+COMPILE_ASSERT(sizeof(BindRenderbuffer) == 12,
+ Sizeof_BindRenderbuffer_is_not_12);
+COMPILE_ASSERT(offsetof(BindRenderbuffer, header) == 0,
+ OffsetOf_BindRenderbuffer_header_not_0);
+COMPILE_ASSERT(offsetof(BindRenderbuffer, target) == 4,
+ OffsetOf_BindRenderbuffer_target_not_4);
+COMPILE_ASSERT(offsetof(BindRenderbuffer, renderbuffer) == 8,
+ OffsetOf_BindRenderbuffer_renderbuffer_not_8);
+
+struct BindTexture {
+ typedef BindTexture ValueType;
+ static const CommandId kCmdId = kBindTexture;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _texture) {
+ SetHeader();
+ target = _target;
+ texture = _texture;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _texture) {
+ static_cast<ValueType*>(cmd)->Init(_target, _texture);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t texture;
+};
+
+COMPILE_ASSERT(sizeof(BindTexture) == 12, Sizeof_BindTexture_is_not_12);
+COMPILE_ASSERT(offsetof(BindTexture, header) == 0,
+ OffsetOf_BindTexture_header_not_0);
+COMPILE_ASSERT(offsetof(BindTexture, target) == 4,
+ OffsetOf_BindTexture_target_not_4);
+COMPILE_ASSERT(offsetof(BindTexture, texture) == 8,
+ OffsetOf_BindTexture_texture_not_8);
+
+struct BlendColor {
+ typedef BlendColor ValueType;
+ static const CommandId kCmdId = kBlendColor;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLclampf _red, GLclampf _green, GLclampf _blue, GLclampf _alpha) {
+ SetHeader();
+ red = _red;
+ green = _green;
+ blue = _blue;
+ alpha = _alpha;
+ }
+
+ void* Set(void* cmd,
+ GLclampf _red,
+ GLclampf _green,
+ GLclampf _blue,
+ GLclampf _alpha) {
+ static_cast<ValueType*>(cmd)->Init(_red, _green, _blue, _alpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float red;
+ float green;
+ float blue;
+ float alpha;
+};
+
+COMPILE_ASSERT(sizeof(BlendColor) == 20, Sizeof_BlendColor_is_not_20);
+COMPILE_ASSERT(offsetof(BlendColor, header) == 0,
+ OffsetOf_BlendColor_header_not_0);
+COMPILE_ASSERT(offsetof(BlendColor, red) == 4, OffsetOf_BlendColor_red_not_4);
+COMPILE_ASSERT(offsetof(BlendColor, green) == 8,
+ OffsetOf_BlendColor_green_not_8);
+COMPILE_ASSERT(offsetof(BlendColor, blue) == 12,
+ OffsetOf_BlendColor_blue_not_12);
+COMPILE_ASSERT(offsetof(BlendColor, alpha) == 16,
+ OffsetOf_BlendColor_alpha_not_16);
+
+struct BlendEquation {
+ typedef BlendEquation ValueType;
+ static const CommandId kCmdId = kBlendEquation;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode) {
+ SetHeader();
+ mode = _mode;
+ }
+
+ void* Set(void* cmd, GLenum _mode) {
+ static_cast<ValueType*>(cmd)->Init(_mode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+};
+
+COMPILE_ASSERT(sizeof(BlendEquation) == 8, Sizeof_BlendEquation_is_not_8);
+COMPILE_ASSERT(offsetof(BlendEquation, header) == 0,
+ OffsetOf_BlendEquation_header_not_0);
+COMPILE_ASSERT(offsetof(BlendEquation, mode) == 4,
+ OffsetOf_BlendEquation_mode_not_4);
+
+struct BlendEquationSeparate {
+ typedef BlendEquationSeparate ValueType;
+ static const CommandId kCmdId = kBlendEquationSeparate;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _modeRGB, GLenum _modeAlpha) {
+ SetHeader();
+ modeRGB = _modeRGB;
+ modeAlpha = _modeAlpha;
+ }
+
+ void* Set(void* cmd, GLenum _modeRGB, GLenum _modeAlpha) {
+ static_cast<ValueType*>(cmd)->Init(_modeRGB, _modeAlpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t modeRGB;
+ uint32_t modeAlpha;
+};
+
+COMPILE_ASSERT(sizeof(BlendEquationSeparate) == 12,
+ Sizeof_BlendEquationSeparate_is_not_12);
+COMPILE_ASSERT(offsetof(BlendEquationSeparate, header) == 0,
+ OffsetOf_BlendEquationSeparate_header_not_0);
+COMPILE_ASSERT(offsetof(BlendEquationSeparate, modeRGB) == 4,
+ OffsetOf_BlendEquationSeparate_modeRGB_not_4);
+COMPILE_ASSERT(offsetof(BlendEquationSeparate, modeAlpha) == 8,
+ OffsetOf_BlendEquationSeparate_modeAlpha_not_8);
+
+struct BlendFunc {
+ typedef BlendFunc ValueType;
+ static const CommandId kCmdId = kBlendFunc;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _sfactor, GLenum _dfactor) {
+ SetHeader();
+ sfactor = _sfactor;
+ dfactor = _dfactor;
+ }
+
+ void* Set(void* cmd, GLenum _sfactor, GLenum _dfactor) {
+ static_cast<ValueType*>(cmd)->Init(_sfactor, _dfactor);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t sfactor;
+ uint32_t dfactor;
+};
+
+COMPILE_ASSERT(sizeof(BlendFunc) == 12, Sizeof_BlendFunc_is_not_12);
+COMPILE_ASSERT(offsetof(BlendFunc, header) == 0,
+ OffsetOf_BlendFunc_header_not_0);
+COMPILE_ASSERT(offsetof(BlendFunc, sfactor) == 4,
+ OffsetOf_BlendFunc_sfactor_not_4);
+COMPILE_ASSERT(offsetof(BlendFunc, dfactor) == 8,
+ OffsetOf_BlendFunc_dfactor_not_8);
+
+struct BlendFuncSeparate {
+ typedef BlendFuncSeparate ValueType;
+ static const CommandId kCmdId = kBlendFuncSeparate;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _srcRGB,
+ GLenum _dstRGB,
+ GLenum _srcAlpha,
+ GLenum _dstAlpha) {
+ SetHeader();
+ srcRGB = _srcRGB;
+ dstRGB = _dstRGB;
+ srcAlpha = _srcAlpha;
+ dstAlpha = _dstAlpha;
+ }
+
+ void* Set(void* cmd,
+ GLenum _srcRGB,
+ GLenum _dstRGB,
+ GLenum _srcAlpha,
+ GLenum _dstAlpha) {
+ static_cast<ValueType*>(cmd)->Init(_srcRGB, _dstRGB, _srcAlpha, _dstAlpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t srcRGB;
+ uint32_t dstRGB;
+ uint32_t srcAlpha;
+ uint32_t dstAlpha;
+};
+
+COMPILE_ASSERT(sizeof(BlendFuncSeparate) == 20,
+ Sizeof_BlendFuncSeparate_is_not_20);
+COMPILE_ASSERT(offsetof(BlendFuncSeparate, header) == 0,
+ OffsetOf_BlendFuncSeparate_header_not_0);
+COMPILE_ASSERT(offsetof(BlendFuncSeparate, srcRGB) == 4,
+ OffsetOf_BlendFuncSeparate_srcRGB_not_4);
+COMPILE_ASSERT(offsetof(BlendFuncSeparate, dstRGB) == 8,
+ OffsetOf_BlendFuncSeparate_dstRGB_not_8);
+COMPILE_ASSERT(offsetof(BlendFuncSeparate, srcAlpha) == 12,
+ OffsetOf_BlendFuncSeparate_srcAlpha_not_12);
+COMPILE_ASSERT(offsetof(BlendFuncSeparate, dstAlpha) == 16,
+ OffsetOf_BlendFuncSeparate_dstAlpha_not_16);
+
+struct BufferData {
+ typedef BufferData ValueType;
+ static const CommandId kCmdId = kBufferData;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizeiptr _size,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset,
+ GLenum _usage) {
+ SetHeader();
+ target = _target;
+ size = _size;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ usage = _usage;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizeiptr _size,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset,
+ GLenum _usage) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _size, _data_shm_id, _data_shm_offset, _usage);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t size;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+ uint32_t usage;
+};
+
+COMPILE_ASSERT(sizeof(BufferData) == 24, Sizeof_BufferData_is_not_24);
+COMPILE_ASSERT(offsetof(BufferData, header) == 0,
+ OffsetOf_BufferData_header_not_0);
+COMPILE_ASSERT(offsetof(BufferData, target) == 4,
+ OffsetOf_BufferData_target_not_4);
+COMPILE_ASSERT(offsetof(BufferData, size) == 8, OffsetOf_BufferData_size_not_8);
+COMPILE_ASSERT(offsetof(BufferData, data_shm_id) == 12,
+ OffsetOf_BufferData_data_shm_id_not_12);
+COMPILE_ASSERT(offsetof(BufferData, data_shm_offset) == 16,
+ OffsetOf_BufferData_data_shm_offset_not_16);
+COMPILE_ASSERT(offsetof(BufferData, usage) == 20,
+ OffsetOf_BufferData_usage_not_20);
+
+struct BufferSubData {
+ typedef BufferSubData ValueType;
+ static const CommandId kCmdId = kBufferSubData;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLintptr _offset,
+ GLsizeiptr _size,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ SetHeader();
+ target = _target;
+ offset = _offset;
+ size = _size;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLintptr _offset,
+ GLsizeiptr _size,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _offset, _size, _data_shm_id, _data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t offset;
+ int32_t size;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(BufferSubData) == 24, Sizeof_BufferSubData_is_not_24);
+COMPILE_ASSERT(offsetof(BufferSubData, header) == 0,
+ OffsetOf_BufferSubData_header_not_0);
+COMPILE_ASSERT(offsetof(BufferSubData, target) == 4,
+ OffsetOf_BufferSubData_target_not_4);
+COMPILE_ASSERT(offsetof(BufferSubData, offset) == 8,
+ OffsetOf_BufferSubData_offset_not_8);
+COMPILE_ASSERT(offsetof(BufferSubData, size) == 12,
+ OffsetOf_BufferSubData_size_not_12);
+COMPILE_ASSERT(offsetof(BufferSubData, data_shm_id) == 16,
+ OffsetOf_BufferSubData_data_shm_id_not_16);
+COMPILE_ASSERT(offsetof(BufferSubData, data_shm_offset) == 20,
+ OffsetOf_BufferSubData_data_shm_offset_not_20);
+
+struct CheckFramebufferStatus {
+ typedef CheckFramebufferStatus ValueType;
+ static const CommandId kCmdId = kCheckFramebufferStatus;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLenum Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ target = _target;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(CheckFramebufferStatus) == 16,
+ Sizeof_CheckFramebufferStatus_is_not_16);
+COMPILE_ASSERT(offsetof(CheckFramebufferStatus, header) == 0,
+ OffsetOf_CheckFramebufferStatus_header_not_0);
+COMPILE_ASSERT(offsetof(CheckFramebufferStatus, target) == 4,
+ OffsetOf_CheckFramebufferStatus_target_not_4);
+COMPILE_ASSERT(offsetof(CheckFramebufferStatus, result_shm_id) == 8,
+ OffsetOf_CheckFramebufferStatus_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(CheckFramebufferStatus, result_shm_offset) == 12,
+ OffsetOf_CheckFramebufferStatus_result_shm_offset_not_12);
+
+struct Clear {
+ typedef Clear ValueType;
+ static const CommandId kCmdId = kClear;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLbitfield _mask) {
+ SetHeader();
+ mask = _mask;
+ }
+
+ void* Set(void* cmd, GLbitfield _mask) {
+ static_cast<ValueType*>(cmd)->Init(_mask);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mask;
+};
+
+COMPILE_ASSERT(sizeof(Clear) == 8, Sizeof_Clear_is_not_8);
+COMPILE_ASSERT(offsetof(Clear, header) == 0, OffsetOf_Clear_header_not_0);
+COMPILE_ASSERT(offsetof(Clear, mask) == 4, OffsetOf_Clear_mask_not_4);
+
+struct ClearColor {
+ typedef ClearColor ValueType;
+ static const CommandId kCmdId = kClearColor;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLclampf _red, GLclampf _green, GLclampf _blue, GLclampf _alpha) {
+ SetHeader();
+ red = _red;
+ green = _green;
+ blue = _blue;
+ alpha = _alpha;
+ }
+
+ void* Set(void* cmd,
+ GLclampf _red,
+ GLclampf _green,
+ GLclampf _blue,
+ GLclampf _alpha) {
+ static_cast<ValueType*>(cmd)->Init(_red, _green, _blue, _alpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float red;
+ float green;
+ float blue;
+ float alpha;
+};
+
+COMPILE_ASSERT(sizeof(ClearColor) == 20, Sizeof_ClearColor_is_not_20);
+COMPILE_ASSERT(offsetof(ClearColor, header) == 0,
+ OffsetOf_ClearColor_header_not_0);
+COMPILE_ASSERT(offsetof(ClearColor, red) == 4, OffsetOf_ClearColor_red_not_4);
+COMPILE_ASSERT(offsetof(ClearColor, green) == 8,
+ OffsetOf_ClearColor_green_not_8);
+COMPILE_ASSERT(offsetof(ClearColor, blue) == 12,
+ OffsetOf_ClearColor_blue_not_12);
+COMPILE_ASSERT(offsetof(ClearColor, alpha) == 16,
+ OffsetOf_ClearColor_alpha_not_16);
+
+struct ClearDepthf {
+ typedef ClearDepthf ValueType;
+ static const CommandId kCmdId = kClearDepthf;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLclampf _depth) {
+ SetHeader();
+ depth = _depth;
+ }
+
+ void* Set(void* cmd, GLclampf _depth) {
+ static_cast<ValueType*>(cmd)->Init(_depth);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float depth;
+};
+
+COMPILE_ASSERT(sizeof(ClearDepthf) == 8, Sizeof_ClearDepthf_is_not_8);
+COMPILE_ASSERT(offsetof(ClearDepthf, header) == 0,
+ OffsetOf_ClearDepthf_header_not_0);
+COMPILE_ASSERT(offsetof(ClearDepthf, depth) == 4,
+ OffsetOf_ClearDepthf_depth_not_4);
+
+struct ClearStencil {
+ typedef ClearStencil ValueType;
+ static const CommandId kCmdId = kClearStencil;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _s) {
+ SetHeader();
+ s = _s;
+ }
+
+ void* Set(void* cmd, GLint _s) {
+ static_cast<ValueType*>(cmd)->Init(_s);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t s;
+};
+
+COMPILE_ASSERT(sizeof(ClearStencil) == 8, Sizeof_ClearStencil_is_not_8);
+COMPILE_ASSERT(offsetof(ClearStencil, header) == 0,
+ OffsetOf_ClearStencil_header_not_0);
+COMPILE_ASSERT(offsetof(ClearStencil, s) == 4, OffsetOf_ClearStencil_s_not_4);
+
+struct ColorMask {
+ typedef ColorMask ValueType;
+ static const CommandId kCmdId = kColorMask;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLboolean _red,
+ GLboolean _green,
+ GLboolean _blue,
+ GLboolean _alpha) {
+ SetHeader();
+ red = _red;
+ green = _green;
+ blue = _blue;
+ alpha = _alpha;
+ }
+
+ void* Set(void* cmd,
+ GLboolean _red,
+ GLboolean _green,
+ GLboolean _blue,
+ GLboolean _alpha) {
+ static_cast<ValueType*>(cmd)->Init(_red, _green, _blue, _alpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t alpha;
+};
+
+COMPILE_ASSERT(sizeof(ColorMask) == 20, Sizeof_ColorMask_is_not_20);
+COMPILE_ASSERT(offsetof(ColorMask, header) == 0,
+ OffsetOf_ColorMask_header_not_0);
+COMPILE_ASSERT(offsetof(ColorMask, red) == 4, OffsetOf_ColorMask_red_not_4);
+COMPILE_ASSERT(offsetof(ColorMask, green) == 8, OffsetOf_ColorMask_green_not_8);
+COMPILE_ASSERT(offsetof(ColorMask, blue) == 12, OffsetOf_ColorMask_blue_not_12);
+COMPILE_ASSERT(offsetof(ColorMask, alpha) == 16,
+ OffsetOf_ColorMask_alpha_not_16);
+
+struct CompileShader {
+ typedef CompileShader ValueType;
+ static const CommandId kCmdId = kCompileShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader) {
+ SetHeader();
+ shader = _shader;
+ }
+
+ void* Set(void* cmd, GLuint _shader) {
+ static_cast<ValueType*>(cmd)->Init(_shader);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+};
+
+COMPILE_ASSERT(sizeof(CompileShader) == 8, Sizeof_CompileShader_is_not_8);
+COMPILE_ASSERT(offsetof(CompileShader, header) == 0,
+ OffsetOf_CompileShader_header_not_0);
+COMPILE_ASSERT(offsetof(CompileShader, shader) == 4,
+ OffsetOf_CompileShader_shader_not_4);
+
+struct CompressedTexImage2DBucket {
+ typedef CompressedTexImage2DBucket ValueType;
+ static const CommandId kCmdId = kCompressedTexImage2DBucket;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLuint _bucket_id) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLuint _bucket_id) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _level, _internalformat, _width, _height, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+ uint32_t bucket_id;
+ static const int32_t border = 0;
+};
+
+COMPILE_ASSERT(sizeof(CompressedTexImage2DBucket) == 28,
+ Sizeof_CompressedTexImage2DBucket_is_not_28);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, header) == 0,
+ OffsetOf_CompressedTexImage2DBucket_header_not_0);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, target) == 4,
+ OffsetOf_CompressedTexImage2DBucket_target_not_4);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, level) == 8,
+ OffsetOf_CompressedTexImage2DBucket_level_not_8);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, internalformat) == 12,
+ OffsetOf_CompressedTexImage2DBucket_internalformat_not_12);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, width) == 16,
+ OffsetOf_CompressedTexImage2DBucket_width_not_16);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, height) == 20,
+ OffsetOf_CompressedTexImage2DBucket_height_not_20);
+COMPILE_ASSERT(offsetof(CompressedTexImage2DBucket, bucket_id) == 24,
+ OffsetOf_CompressedTexImage2DBucket_bucket_id_not_24);
+
+struct CompressedTexImage2D {
+ typedef CompressedTexImage2D ValueType;
+ static const CommandId kCmdId = kCompressedTexImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLsizei _imageSize,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ imageSize = _imageSize;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLsizei _imageSize,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _internalformat,
+ _width,
+ _height,
+ _imageSize,
+ _data_shm_id,
+ _data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+ int32_t imageSize;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+ static const int32_t border = 0;
+};
+
+COMPILE_ASSERT(sizeof(CompressedTexImage2D) == 36,
+ Sizeof_CompressedTexImage2D_is_not_36);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, header) == 0,
+ OffsetOf_CompressedTexImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, target) == 4,
+ OffsetOf_CompressedTexImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, level) == 8,
+ OffsetOf_CompressedTexImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, internalformat) == 12,
+ OffsetOf_CompressedTexImage2D_internalformat_not_12);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, width) == 16,
+ OffsetOf_CompressedTexImage2D_width_not_16);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, height) == 20,
+ OffsetOf_CompressedTexImage2D_height_not_20);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, imageSize) == 24,
+ OffsetOf_CompressedTexImage2D_imageSize_not_24);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, data_shm_id) == 28,
+ OffsetOf_CompressedTexImage2D_data_shm_id_not_28);
+COMPILE_ASSERT(offsetof(CompressedTexImage2D, data_shm_offset) == 32,
+ OffsetOf_CompressedTexImage2D_data_shm_offset_not_32);
+
+struct CompressedTexSubImage2DBucket {
+ typedef CompressedTexSubImage2DBucket ValueType;
+ static const CommandId kCmdId = kCompressedTexSubImage2DBucket;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLuint _bucket_id) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ xoffset = _xoffset;
+ yoffset = _yoffset;
+ width = _width;
+ height = _height;
+ format = _format;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLuint _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _xoffset,
+ _yoffset,
+ _width,
+ _height,
+ _format,
+ _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t xoffset;
+ int32_t yoffset;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(CompressedTexSubImage2DBucket) == 36,
+ Sizeof_CompressedTexSubImage2DBucket_is_not_36);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, header) == 0,
+ OffsetOf_CompressedTexSubImage2DBucket_header_not_0);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, target) == 4,
+ OffsetOf_CompressedTexSubImage2DBucket_target_not_4);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, level) == 8,
+ OffsetOf_CompressedTexSubImage2DBucket_level_not_8);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, xoffset) == 12,
+ OffsetOf_CompressedTexSubImage2DBucket_xoffset_not_12);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, yoffset) == 16,
+ OffsetOf_CompressedTexSubImage2DBucket_yoffset_not_16);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, width) == 20,
+ OffsetOf_CompressedTexSubImage2DBucket_width_not_20);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, height) == 24,
+ OffsetOf_CompressedTexSubImage2DBucket_height_not_24);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, format) == 28,
+ OffsetOf_CompressedTexSubImage2DBucket_format_not_28);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2DBucket, bucket_id) == 32,
+ OffsetOf_CompressedTexSubImage2DBucket_bucket_id_not_32);
+
+struct CompressedTexSubImage2D {
+ typedef CompressedTexSubImage2D ValueType;
+ static const CommandId kCmdId = kCompressedTexSubImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLsizei _imageSize,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ xoffset = _xoffset;
+ yoffset = _yoffset;
+ width = _width;
+ height = _height;
+ format = _format;
+ imageSize = _imageSize;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLsizei _imageSize,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _xoffset,
+ _yoffset,
+ _width,
+ _height,
+ _format,
+ _imageSize,
+ _data_shm_id,
+ _data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t xoffset;
+ int32_t yoffset;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ int32_t imageSize;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(CompressedTexSubImage2D) == 44,
+ Sizeof_CompressedTexSubImage2D_is_not_44);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, header) == 0,
+ OffsetOf_CompressedTexSubImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, target) == 4,
+ OffsetOf_CompressedTexSubImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, level) == 8,
+ OffsetOf_CompressedTexSubImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, xoffset) == 12,
+ OffsetOf_CompressedTexSubImage2D_xoffset_not_12);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, yoffset) == 16,
+ OffsetOf_CompressedTexSubImage2D_yoffset_not_16);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, width) == 20,
+ OffsetOf_CompressedTexSubImage2D_width_not_20);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, height) == 24,
+ OffsetOf_CompressedTexSubImage2D_height_not_24);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, format) == 28,
+ OffsetOf_CompressedTexSubImage2D_format_not_28);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, imageSize) == 32,
+ OffsetOf_CompressedTexSubImage2D_imageSize_not_32);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, data_shm_id) == 36,
+ OffsetOf_CompressedTexSubImage2D_data_shm_id_not_36);
+COMPILE_ASSERT(offsetof(CompressedTexSubImage2D, data_shm_offset) == 40,
+ OffsetOf_CompressedTexSubImage2D_data_shm_offset_not_40);
+
+struct CopyTexImage2D {
+ typedef CopyTexImage2D ValueType;
+ static const CommandId kCmdId = kCopyTexImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ internalformat = _internalformat;
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLenum _internalformat,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _level, _internalformat, _x, _y, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ uint32_t internalformat;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+ static const int32_t border = 0;
+};
+
+COMPILE_ASSERT(sizeof(CopyTexImage2D) == 32, Sizeof_CopyTexImage2D_is_not_32);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, header) == 0,
+ OffsetOf_CopyTexImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, target) == 4,
+ OffsetOf_CopyTexImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, level) == 8,
+ OffsetOf_CopyTexImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, internalformat) == 12,
+ OffsetOf_CopyTexImage2D_internalformat_not_12);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, x) == 16,
+ OffsetOf_CopyTexImage2D_x_not_16);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, y) == 20,
+ OffsetOf_CopyTexImage2D_y_not_20);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, width) == 24,
+ OffsetOf_CopyTexImage2D_width_not_24);
+COMPILE_ASSERT(offsetof(CopyTexImage2D, height) == 28,
+ OffsetOf_CopyTexImage2D_height_not_28);
+
+struct CopyTexSubImage2D {
+ typedef CopyTexSubImage2D ValueType;
+ static const CommandId kCmdId = kCopyTexSubImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ xoffset = _xoffset;
+ yoffset = _yoffset;
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _level, _xoffset, _yoffset, _x, _y, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t xoffset;
+ int32_t yoffset;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(CopyTexSubImage2D) == 36,
+ Sizeof_CopyTexSubImage2D_is_not_36);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, header) == 0,
+ OffsetOf_CopyTexSubImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, target) == 4,
+ OffsetOf_CopyTexSubImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, level) == 8,
+ OffsetOf_CopyTexSubImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, xoffset) == 12,
+ OffsetOf_CopyTexSubImage2D_xoffset_not_12);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, yoffset) == 16,
+ OffsetOf_CopyTexSubImage2D_yoffset_not_16);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, x) == 20,
+ OffsetOf_CopyTexSubImage2D_x_not_20);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, y) == 24,
+ OffsetOf_CopyTexSubImage2D_y_not_24);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, width) == 28,
+ OffsetOf_CopyTexSubImage2D_width_not_28);
+COMPILE_ASSERT(offsetof(CopyTexSubImage2D, height) == 32,
+ OffsetOf_CopyTexSubImage2D_height_not_32);
+
+struct CreateProgram {
+ typedef CreateProgram ValueType;
+ static const CommandId kCmdId = kCreateProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _client_id) {
+ SetHeader();
+ client_id = _client_id;
+ }
+
+ void* Set(void* cmd, uint32_t _client_id) {
+ static_cast<ValueType*>(cmd)->Init(_client_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t client_id;
+};
+
+COMPILE_ASSERT(sizeof(CreateProgram) == 8, Sizeof_CreateProgram_is_not_8);
+COMPILE_ASSERT(offsetof(CreateProgram, header) == 0,
+ OffsetOf_CreateProgram_header_not_0);
+COMPILE_ASSERT(offsetof(CreateProgram, client_id) == 4,
+ OffsetOf_CreateProgram_client_id_not_4);
+
+struct CreateShader {
+ typedef CreateShader ValueType;
+ static const CommandId kCmdId = kCreateShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _type, uint32_t _client_id) {
+ SetHeader();
+ type = _type;
+ client_id = _client_id;
+ }
+
+ void* Set(void* cmd, GLenum _type, uint32_t _client_id) {
+ static_cast<ValueType*>(cmd)->Init(_type, _client_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t type;
+ uint32_t client_id;
+};
+
+COMPILE_ASSERT(sizeof(CreateShader) == 12, Sizeof_CreateShader_is_not_12);
+COMPILE_ASSERT(offsetof(CreateShader, header) == 0,
+ OffsetOf_CreateShader_header_not_0);
+COMPILE_ASSERT(offsetof(CreateShader, type) == 4,
+ OffsetOf_CreateShader_type_not_4);
+COMPILE_ASSERT(offsetof(CreateShader, client_id) == 8,
+ OffsetOf_CreateShader_client_id_not_8);
+
+struct CullFace {
+ typedef CullFace ValueType;
+ static const CommandId kCmdId = kCullFace;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode) {
+ SetHeader();
+ mode = _mode;
+ }
+
+ void* Set(void* cmd, GLenum _mode) {
+ static_cast<ValueType*>(cmd)->Init(_mode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+};
+
+COMPILE_ASSERT(sizeof(CullFace) == 8, Sizeof_CullFace_is_not_8);
+COMPILE_ASSERT(offsetof(CullFace, header) == 0, OffsetOf_CullFace_header_not_0);
+COMPILE_ASSERT(offsetof(CullFace, mode) == 4, OffsetOf_CullFace_mode_not_4);
+
+struct DeleteBuffersImmediate {
+ typedef DeleteBuffersImmediate ValueType;
+ static const CommandId kCmdId = kDeleteBuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _buffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _buffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _buffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _buffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteBuffersImmediate) == 8,
+ Sizeof_DeleteBuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteBuffersImmediate, header) == 0,
+ OffsetOf_DeleteBuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteBuffersImmediate, n) == 4,
+ OffsetOf_DeleteBuffersImmediate_n_not_4);
+
+struct DeleteFramebuffersImmediate {
+ typedef DeleteFramebuffersImmediate ValueType;
+ static const CommandId kCmdId = kDeleteFramebuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _framebuffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _framebuffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _framebuffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _framebuffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteFramebuffersImmediate) == 8,
+ Sizeof_DeleteFramebuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteFramebuffersImmediate, header) == 0,
+ OffsetOf_DeleteFramebuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteFramebuffersImmediate, n) == 4,
+ OffsetOf_DeleteFramebuffersImmediate_n_not_4);
+
+struct DeleteProgram {
+ typedef DeleteProgram ValueType;
+ static const CommandId kCmdId = kDeleteProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program) {
+ SetHeader();
+ program = _program;
+ }
+
+ void* Set(void* cmd, GLuint _program) {
+ static_cast<ValueType*>(cmd)->Init(_program);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+};
+
+COMPILE_ASSERT(sizeof(DeleteProgram) == 8, Sizeof_DeleteProgram_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteProgram, header) == 0,
+ OffsetOf_DeleteProgram_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteProgram, program) == 4,
+ OffsetOf_DeleteProgram_program_not_4);
+
+struct DeleteRenderbuffersImmediate {
+ typedef DeleteRenderbuffersImmediate ValueType;
+ static const CommandId kCmdId = kDeleteRenderbuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _renderbuffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _renderbuffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _renderbuffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _renderbuffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteRenderbuffersImmediate) == 8,
+ Sizeof_DeleteRenderbuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteRenderbuffersImmediate, header) == 0,
+ OffsetOf_DeleteRenderbuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteRenderbuffersImmediate, n) == 4,
+ OffsetOf_DeleteRenderbuffersImmediate_n_not_4);
+
+struct DeleteShader {
+ typedef DeleteShader ValueType;
+ static const CommandId kCmdId = kDeleteShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader) {
+ SetHeader();
+ shader = _shader;
+ }
+
+ void* Set(void* cmd, GLuint _shader) {
+ static_cast<ValueType*>(cmd)->Init(_shader);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+};
+
+COMPILE_ASSERT(sizeof(DeleteShader) == 8, Sizeof_DeleteShader_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteShader, header) == 0,
+ OffsetOf_DeleteShader_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteShader, shader) == 4,
+ OffsetOf_DeleteShader_shader_not_4);
+
+struct DeleteTexturesImmediate {
+ typedef DeleteTexturesImmediate ValueType;
+ static const CommandId kCmdId = kDeleteTexturesImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _textures) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _textures, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _textures) {
+ static_cast<ValueType*>(cmd)->Init(_n, _textures);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteTexturesImmediate) == 8,
+ Sizeof_DeleteTexturesImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteTexturesImmediate, header) == 0,
+ OffsetOf_DeleteTexturesImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteTexturesImmediate, n) == 4,
+ OffsetOf_DeleteTexturesImmediate_n_not_4);
+
+struct DepthFunc {
+ typedef DepthFunc ValueType;
+ static const CommandId kCmdId = kDepthFunc;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _func) {
+ SetHeader();
+ func = _func;
+ }
+
+ void* Set(void* cmd, GLenum _func) {
+ static_cast<ValueType*>(cmd)->Init(_func);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t func;
+};
+
+COMPILE_ASSERT(sizeof(DepthFunc) == 8, Sizeof_DepthFunc_is_not_8);
+COMPILE_ASSERT(offsetof(DepthFunc, header) == 0,
+ OffsetOf_DepthFunc_header_not_0);
+COMPILE_ASSERT(offsetof(DepthFunc, func) == 4, OffsetOf_DepthFunc_func_not_4);
+
+struct DepthMask {
+ typedef DepthMask ValueType;
+ static const CommandId kCmdId = kDepthMask;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLboolean _flag) {
+ SetHeader();
+ flag = _flag;
+ }
+
+ void* Set(void* cmd, GLboolean _flag) {
+ static_cast<ValueType*>(cmd)->Init(_flag);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t flag;
+};
+
+COMPILE_ASSERT(sizeof(DepthMask) == 8, Sizeof_DepthMask_is_not_8);
+COMPILE_ASSERT(offsetof(DepthMask, header) == 0,
+ OffsetOf_DepthMask_header_not_0);
+COMPILE_ASSERT(offsetof(DepthMask, flag) == 4, OffsetOf_DepthMask_flag_not_4);
+
+struct DepthRangef {
+ typedef DepthRangef ValueType;
+ static const CommandId kCmdId = kDepthRangef;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLclampf _zNear, GLclampf _zFar) {
+ SetHeader();
+ zNear = _zNear;
+ zFar = _zFar;
+ }
+
+ void* Set(void* cmd, GLclampf _zNear, GLclampf _zFar) {
+ static_cast<ValueType*>(cmd)->Init(_zNear, _zFar);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float zNear;
+ float zFar;
+};
+
+COMPILE_ASSERT(sizeof(DepthRangef) == 12, Sizeof_DepthRangef_is_not_12);
+COMPILE_ASSERT(offsetof(DepthRangef, header) == 0,
+ OffsetOf_DepthRangef_header_not_0);
+COMPILE_ASSERT(offsetof(DepthRangef, zNear) == 4,
+ OffsetOf_DepthRangef_zNear_not_4);
+COMPILE_ASSERT(offsetof(DepthRangef, zFar) == 8,
+ OffsetOf_DepthRangef_zFar_not_8);
+
+struct DetachShader {
+ typedef DetachShader ValueType;
+ static const CommandId kCmdId = kDetachShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, GLuint _shader) {
+ SetHeader();
+ program = _program;
+ shader = _shader;
+ }
+
+ void* Set(void* cmd, GLuint _program, GLuint _shader) {
+ static_cast<ValueType*>(cmd)->Init(_program, _shader);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t shader;
+};
+
+COMPILE_ASSERT(sizeof(DetachShader) == 12, Sizeof_DetachShader_is_not_12);
+COMPILE_ASSERT(offsetof(DetachShader, header) == 0,
+ OffsetOf_DetachShader_header_not_0);
+COMPILE_ASSERT(offsetof(DetachShader, program) == 4,
+ OffsetOf_DetachShader_program_not_4);
+COMPILE_ASSERT(offsetof(DetachShader, shader) == 8,
+ OffsetOf_DetachShader_shader_not_8);
+
+struct Disable {
+ typedef Disable ValueType;
+ static const CommandId kCmdId = kDisable;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _cap) {
+ SetHeader();
+ cap = _cap;
+ }
+
+ void* Set(void* cmd, GLenum _cap) {
+ static_cast<ValueType*>(cmd)->Init(_cap);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t cap;
+};
+
+COMPILE_ASSERT(sizeof(Disable) == 8, Sizeof_Disable_is_not_8);
+COMPILE_ASSERT(offsetof(Disable, header) == 0, OffsetOf_Disable_header_not_0);
+COMPILE_ASSERT(offsetof(Disable, cap) == 4, OffsetOf_Disable_cap_not_4);
+
+struct DisableVertexAttribArray {
+ typedef DisableVertexAttribArray ValueType;
+ static const CommandId kCmdId = kDisableVertexAttribArray;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index) {
+ SetHeader();
+ index = _index;
+ }
+
+ void* Set(void* cmd, GLuint _index) {
+ static_cast<ValueType*>(cmd)->Init(_index);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+};
+
+COMPILE_ASSERT(sizeof(DisableVertexAttribArray) == 8,
+ Sizeof_DisableVertexAttribArray_is_not_8);
+COMPILE_ASSERT(offsetof(DisableVertexAttribArray, header) == 0,
+ OffsetOf_DisableVertexAttribArray_header_not_0);
+COMPILE_ASSERT(offsetof(DisableVertexAttribArray, index) == 4,
+ OffsetOf_DisableVertexAttribArray_index_not_4);
+
+struct DrawArrays {
+ typedef DrawArrays ValueType;
+ static const CommandId kCmdId = kDrawArrays;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode, GLint _first, GLsizei _count) {
+ SetHeader();
+ mode = _mode;
+ first = _first;
+ count = _count;
+ }
+
+ void* Set(void* cmd, GLenum _mode, GLint _first, GLsizei _count) {
+ static_cast<ValueType*>(cmd)->Init(_mode, _first, _count);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ int32_t first;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(DrawArrays) == 16, Sizeof_DrawArrays_is_not_16);
+COMPILE_ASSERT(offsetof(DrawArrays, header) == 0,
+ OffsetOf_DrawArrays_header_not_0);
+COMPILE_ASSERT(offsetof(DrawArrays, mode) == 4, OffsetOf_DrawArrays_mode_not_4);
+COMPILE_ASSERT(offsetof(DrawArrays, first) == 8,
+ OffsetOf_DrawArrays_first_not_8);
+COMPILE_ASSERT(offsetof(DrawArrays, count) == 12,
+ OffsetOf_DrawArrays_count_not_12);
+
+struct DrawElements {
+ typedef DrawElements ValueType;
+ static const CommandId kCmdId = kDrawElements;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode, GLsizei _count, GLenum _type, GLuint _index_offset) {
+ SetHeader();
+ mode = _mode;
+ count = _count;
+ type = _type;
+ index_offset = _index_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ GLsizei _count,
+ GLenum _type,
+ GLuint _index_offset) {
+ static_cast<ValueType*>(cmd)->Init(_mode, _count, _type, _index_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ int32_t count;
+ uint32_t type;
+ uint32_t index_offset;
+};
+
+COMPILE_ASSERT(sizeof(DrawElements) == 20, Sizeof_DrawElements_is_not_20);
+COMPILE_ASSERT(offsetof(DrawElements, header) == 0,
+ OffsetOf_DrawElements_header_not_0);
+COMPILE_ASSERT(offsetof(DrawElements, mode) == 4,
+ OffsetOf_DrawElements_mode_not_4);
+COMPILE_ASSERT(offsetof(DrawElements, count) == 8,
+ OffsetOf_DrawElements_count_not_8);
+COMPILE_ASSERT(offsetof(DrawElements, type) == 12,
+ OffsetOf_DrawElements_type_not_12);
+COMPILE_ASSERT(offsetof(DrawElements, index_offset) == 16,
+ OffsetOf_DrawElements_index_offset_not_16);
+
+struct Enable {
+ typedef Enable ValueType;
+ static const CommandId kCmdId = kEnable;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _cap) {
+ SetHeader();
+ cap = _cap;
+ }
+
+ void* Set(void* cmd, GLenum _cap) {
+ static_cast<ValueType*>(cmd)->Init(_cap);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t cap;
+};
+
+COMPILE_ASSERT(sizeof(Enable) == 8, Sizeof_Enable_is_not_8);
+COMPILE_ASSERT(offsetof(Enable, header) == 0, OffsetOf_Enable_header_not_0);
+COMPILE_ASSERT(offsetof(Enable, cap) == 4, OffsetOf_Enable_cap_not_4);
+
+struct EnableVertexAttribArray {
+ typedef EnableVertexAttribArray ValueType;
+ static const CommandId kCmdId = kEnableVertexAttribArray;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index) {
+ SetHeader();
+ index = _index;
+ }
+
+ void* Set(void* cmd, GLuint _index) {
+ static_cast<ValueType*>(cmd)->Init(_index);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+};
+
+COMPILE_ASSERT(sizeof(EnableVertexAttribArray) == 8,
+ Sizeof_EnableVertexAttribArray_is_not_8);
+COMPILE_ASSERT(offsetof(EnableVertexAttribArray, header) == 0,
+ OffsetOf_EnableVertexAttribArray_header_not_0);
+COMPILE_ASSERT(offsetof(EnableVertexAttribArray, index) == 4,
+ OffsetOf_EnableVertexAttribArray_index_not_4);
+
+struct Finish {
+ typedef Finish ValueType;
+ static const CommandId kCmdId = kFinish;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(Finish) == 4, Sizeof_Finish_is_not_4);
+COMPILE_ASSERT(offsetof(Finish, header) == 0, OffsetOf_Finish_header_not_0);
+
+struct Flush {
+ typedef Flush ValueType;
+ static const CommandId kCmdId = kFlush;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(Flush) == 4, Sizeof_Flush_is_not_4);
+COMPILE_ASSERT(offsetof(Flush, header) == 0, OffsetOf_Flush_header_not_0);
+
+struct FramebufferRenderbuffer {
+ typedef FramebufferRenderbuffer ValueType;
+ static const CommandId kCmdId = kFramebufferRenderbuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _attachment,
+ GLenum _renderbuffertarget,
+ GLuint _renderbuffer) {
+ SetHeader();
+ target = _target;
+ attachment = _attachment;
+ renderbuffertarget = _renderbuffertarget;
+ renderbuffer = _renderbuffer;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _attachment,
+ GLenum _renderbuffertarget,
+ GLuint _renderbuffer) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _attachment, _renderbuffertarget, _renderbuffer);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t attachment;
+ uint32_t renderbuffertarget;
+ uint32_t renderbuffer;
+};
+
+COMPILE_ASSERT(sizeof(FramebufferRenderbuffer) == 20,
+ Sizeof_FramebufferRenderbuffer_is_not_20);
+COMPILE_ASSERT(offsetof(FramebufferRenderbuffer, header) == 0,
+ OffsetOf_FramebufferRenderbuffer_header_not_0);
+COMPILE_ASSERT(offsetof(FramebufferRenderbuffer, target) == 4,
+ OffsetOf_FramebufferRenderbuffer_target_not_4);
+COMPILE_ASSERT(offsetof(FramebufferRenderbuffer, attachment) == 8,
+ OffsetOf_FramebufferRenderbuffer_attachment_not_8);
+COMPILE_ASSERT(offsetof(FramebufferRenderbuffer, renderbuffertarget) == 12,
+ OffsetOf_FramebufferRenderbuffer_renderbuffertarget_not_12);
+COMPILE_ASSERT(offsetof(FramebufferRenderbuffer, renderbuffer) == 16,
+ OffsetOf_FramebufferRenderbuffer_renderbuffer_not_16);
+
+struct FramebufferTexture2D {
+ typedef FramebufferTexture2D ValueType;
+ static const CommandId kCmdId = kFramebufferTexture2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _attachment,
+ GLenum _textarget,
+ GLuint _texture) {
+ SetHeader();
+ target = _target;
+ attachment = _attachment;
+ textarget = _textarget;
+ texture = _texture;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _attachment,
+ GLenum _textarget,
+ GLuint _texture) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _attachment, _textarget, _texture);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t attachment;
+ uint32_t textarget;
+ uint32_t texture;
+ static const int32_t level = 0;
+};
+
+COMPILE_ASSERT(sizeof(FramebufferTexture2D) == 20,
+ Sizeof_FramebufferTexture2D_is_not_20);
+COMPILE_ASSERT(offsetof(FramebufferTexture2D, header) == 0,
+ OffsetOf_FramebufferTexture2D_header_not_0);
+COMPILE_ASSERT(offsetof(FramebufferTexture2D, target) == 4,
+ OffsetOf_FramebufferTexture2D_target_not_4);
+COMPILE_ASSERT(offsetof(FramebufferTexture2D, attachment) == 8,
+ OffsetOf_FramebufferTexture2D_attachment_not_8);
+COMPILE_ASSERT(offsetof(FramebufferTexture2D, textarget) == 12,
+ OffsetOf_FramebufferTexture2D_textarget_not_12);
+COMPILE_ASSERT(offsetof(FramebufferTexture2D, texture) == 16,
+ OffsetOf_FramebufferTexture2D_texture_not_16);
+
+struct FrontFace {
+ typedef FrontFace ValueType;
+ static const CommandId kCmdId = kFrontFace;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode) {
+ SetHeader();
+ mode = _mode;
+ }
+
+ void* Set(void* cmd, GLenum _mode) {
+ static_cast<ValueType*>(cmd)->Init(_mode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+};
+
+COMPILE_ASSERT(sizeof(FrontFace) == 8, Sizeof_FrontFace_is_not_8);
+COMPILE_ASSERT(offsetof(FrontFace, header) == 0,
+ OffsetOf_FrontFace_header_not_0);
+COMPILE_ASSERT(offsetof(FrontFace, mode) == 4, OffsetOf_FrontFace_mode_not_4);
+
+struct GenBuffersImmediate {
+ typedef GenBuffersImmediate ValueType;
+ static const CommandId kCmdId = kGenBuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _buffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _buffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _buffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _buffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenBuffersImmediate) == 8,
+ Sizeof_GenBuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenBuffersImmediate, header) == 0,
+ OffsetOf_GenBuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenBuffersImmediate, n) == 4,
+ OffsetOf_GenBuffersImmediate_n_not_4);
+
+struct GenerateMipmap {
+ typedef GenerateMipmap ValueType;
+ static const CommandId kCmdId = kGenerateMipmap;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target) {
+ SetHeader();
+ target = _target;
+ }
+
+ void* Set(void* cmd, GLenum _target) {
+ static_cast<ValueType*>(cmd)->Init(_target);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+};
+
+COMPILE_ASSERT(sizeof(GenerateMipmap) == 8, Sizeof_GenerateMipmap_is_not_8);
+COMPILE_ASSERT(offsetof(GenerateMipmap, header) == 0,
+ OffsetOf_GenerateMipmap_header_not_0);
+COMPILE_ASSERT(offsetof(GenerateMipmap, target) == 4,
+ OffsetOf_GenerateMipmap_target_not_4);
+
+struct GenFramebuffersImmediate {
+ typedef GenFramebuffersImmediate ValueType;
+ static const CommandId kCmdId = kGenFramebuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _framebuffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _framebuffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _framebuffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _framebuffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenFramebuffersImmediate) == 8,
+ Sizeof_GenFramebuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenFramebuffersImmediate, header) == 0,
+ OffsetOf_GenFramebuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenFramebuffersImmediate, n) == 4,
+ OffsetOf_GenFramebuffersImmediate_n_not_4);
+
+struct GenRenderbuffersImmediate {
+ typedef GenRenderbuffersImmediate ValueType;
+ static const CommandId kCmdId = kGenRenderbuffersImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _renderbuffers) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _renderbuffers, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _renderbuffers) {
+ static_cast<ValueType*>(cmd)->Init(_n, _renderbuffers);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenRenderbuffersImmediate) == 8,
+ Sizeof_GenRenderbuffersImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenRenderbuffersImmediate, header) == 0,
+ OffsetOf_GenRenderbuffersImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenRenderbuffersImmediate, n) == 4,
+ OffsetOf_GenRenderbuffersImmediate_n_not_4);
+
+struct GenTexturesImmediate {
+ typedef GenTexturesImmediate ValueType;
+ static const CommandId kCmdId = kGenTexturesImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _textures) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _textures, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _textures) {
+ static_cast<ValueType*>(cmd)->Init(_n, _textures);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenTexturesImmediate) == 8,
+ Sizeof_GenTexturesImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenTexturesImmediate, header) == 0,
+ OffsetOf_GenTexturesImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenTexturesImmediate, n) == 4,
+ OffsetOf_GenTexturesImmediate_n_not_4);
+
+struct GetActiveAttrib {
+ typedef GetActiveAttrib ValueType;
+ static const CommandId kCmdId = kGetActiveAttrib;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ struct Result {
+ int32_t success;
+ int32_t size;
+ uint32_t type;
+ };
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ program = _program;
+ index = _index;
+ name_bucket_id = _name_bucket_id;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _program, _index, _name_bucket_id, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t index;
+ uint32_t name_bucket_id;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetActiveAttrib) == 24, Sizeof_GetActiveAttrib_is_not_24);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, header) == 0,
+ OffsetOf_GetActiveAttrib_header_not_0);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, program) == 4,
+ OffsetOf_GetActiveAttrib_program_not_4);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, index) == 8,
+ OffsetOf_GetActiveAttrib_index_not_8);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, name_bucket_id) == 12,
+ OffsetOf_GetActiveAttrib_name_bucket_id_not_12);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, result_shm_id) == 16,
+ OffsetOf_GetActiveAttrib_result_shm_id_not_16);
+COMPILE_ASSERT(offsetof(GetActiveAttrib, result_shm_offset) == 20,
+ OffsetOf_GetActiveAttrib_result_shm_offset_not_20);
+COMPILE_ASSERT(offsetof(GetActiveAttrib::Result, success) == 0,
+ OffsetOf_GetActiveAttrib_Result_success_not_0);
+COMPILE_ASSERT(offsetof(GetActiveAttrib::Result, size) == 4,
+ OffsetOf_GetActiveAttrib_Result_size_not_4);
+COMPILE_ASSERT(offsetof(GetActiveAttrib::Result, type) == 8,
+ OffsetOf_GetActiveAttrib_Result_type_not_8);
+
+struct GetActiveUniform {
+ typedef GetActiveUniform ValueType;
+ static const CommandId kCmdId = kGetActiveUniform;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ struct Result {
+ int32_t success;
+ int32_t size;
+ uint32_t type;
+ };
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ program = _program;
+ index = _index;
+ name_bucket_id = _name_bucket_id;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _program, _index, _name_bucket_id, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t index;
+ uint32_t name_bucket_id;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetActiveUniform) == 24,
+ Sizeof_GetActiveUniform_is_not_24);
+COMPILE_ASSERT(offsetof(GetActiveUniform, header) == 0,
+ OffsetOf_GetActiveUniform_header_not_0);
+COMPILE_ASSERT(offsetof(GetActiveUniform, program) == 4,
+ OffsetOf_GetActiveUniform_program_not_4);
+COMPILE_ASSERT(offsetof(GetActiveUniform, index) == 8,
+ OffsetOf_GetActiveUniform_index_not_8);
+COMPILE_ASSERT(offsetof(GetActiveUniform, name_bucket_id) == 12,
+ OffsetOf_GetActiveUniform_name_bucket_id_not_12);
+COMPILE_ASSERT(offsetof(GetActiveUniform, result_shm_id) == 16,
+ OffsetOf_GetActiveUniform_result_shm_id_not_16);
+COMPILE_ASSERT(offsetof(GetActiveUniform, result_shm_offset) == 20,
+ OffsetOf_GetActiveUniform_result_shm_offset_not_20);
+COMPILE_ASSERT(offsetof(GetActiveUniform::Result, success) == 0,
+ OffsetOf_GetActiveUniform_Result_success_not_0);
+COMPILE_ASSERT(offsetof(GetActiveUniform::Result, size) == 4,
+ OffsetOf_GetActiveUniform_Result_size_not_4);
+COMPILE_ASSERT(offsetof(GetActiveUniform::Result, type) == 8,
+ OffsetOf_GetActiveUniform_Result_type_not_8);
+
+struct GetAttachedShaders {
+ typedef GetAttachedShaders ValueType;
+ static const CommandId kCmdId = kGetAttachedShaders;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLuint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset,
+ uint32_t _result_size) {
+ SetHeader();
+ program = _program;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ result_size = _result_size;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset,
+ uint32_t _result_size) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_program, _result_shm_id, _result_shm_offset, _result_size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+ uint32_t result_size;
+};
+
+COMPILE_ASSERT(sizeof(GetAttachedShaders) == 20,
+ Sizeof_GetAttachedShaders_is_not_20);
+COMPILE_ASSERT(offsetof(GetAttachedShaders, header) == 0,
+ OffsetOf_GetAttachedShaders_header_not_0);
+COMPILE_ASSERT(offsetof(GetAttachedShaders, program) == 4,
+ OffsetOf_GetAttachedShaders_program_not_4);
+COMPILE_ASSERT(offsetof(GetAttachedShaders, result_shm_id) == 8,
+ OffsetOf_GetAttachedShaders_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(GetAttachedShaders, result_shm_offset) == 12,
+ OffsetOf_GetAttachedShaders_result_shm_offset_not_12);
+COMPILE_ASSERT(offsetof(GetAttachedShaders, result_size) == 16,
+ OffsetOf_GetAttachedShaders_result_size_not_16);
+
+struct GetAttribLocation {
+ typedef GetAttribLocation ValueType;
+ static const CommandId kCmdId = kGetAttribLocation;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ SetHeader();
+ program = _program;
+ name_bucket_id = _name_bucket_id;
+ location_shm_id = _location_shm_id;
+ location_shm_offset = _location_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _program, _name_bucket_id, _location_shm_id, _location_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t name_bucket_id;
+ uint32_t location_shm_id;
+ uint32_t location_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetAttribLocation) == 20,
+ Sizeof_GetAttribLocation_is_not_20);
+COMPILE_ASSERT(offsetof(GetAttribLocation, header) == 0,
+ OffsetOf_GetAttribLocation_header_not_0);
+COMPILE_ASSERT(offsetof(GetAttribLocation, program) == 4,
+ OffsetOf_GetAttribLocation_program_not_4);
+COMPILE_ASSERT(offsetof(GetAttribLocation, name_bucket_id) == 8,
+ OffsetOf_GetAttribLocation_name_bucket_id_not_8);
+COMPILE_ASSERT(offsetof(GetAttribLocation, location_shm_id) == 12,
+ OffsetOf_GetAttribLocation_location_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetAttribLocation, location_shm_offset) == 16,
+ OffsetOf_GetAttribLocation_location_shm_offset_not_16);
+
+struct GetBooleanv {
+ typedef GetBooleanv ValueType;
+ static const CommandId kCmdId = kGetBooleanv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLboolean> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetBooleanv) == 16, Sizeof_GetBooleanv_is_not_16);
+COMPILE_ASSERT(offsetof(GetBooleanv, header) == 0,
+ OffsetOf_GetBooleanv_header_not_0);
+COMPILE_ASSERT(offsetof(GetBooleanv, pname) == 4,
+ OffsetOf_GetBooleanv_pname_not_4);
+COMPILE_ASSERT(offsetof(GetBooleanv, params_shm_id) == 8,
+ OffsetOf_GetBooleanv_params_shm_id_not_8);
+COMPILE_ASSERT(offsetof(GetBooleanv, params_shm_offset) == 12,
+ OffsetOf_GetBooleanv_params_shm_offset_not_12);
+
+struct GetBufferParameteriv {
+ typedef GetBufferParameteriv ValueType;
+ static const CommandId kCmdId = kGetBufferParameteriv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetBufferParameteriv) == 20,
+ Sizeof_GetBufferParameteriv_is_not_20);
+COMPILE_ASSERT(offsetof(GetBufferParameteriv, header) == 0,
+ OffsetOf_GetBufferParameteriv_header_not_0);
+COMPILE_ASSERT(offsetof(GetBufferParameteriv, target) == 4,
+ OffsetOf_GetBufferParameteriv_target_not_4);
+COMPILE_ASSERT(offsetof(GetBufferParameteriv, pname) == 8,
+ OffsetOf_GetBufferParameteriv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetBufferParameteriv, params_shm_id) == 12,
+ OffsetOf_GetBufferParameteriv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetBufferParameteriv, params_shm_offset) == 16,
+ OffsetOf_GetBufferParameteriv_params_shm_offset_not_16);
+
+struct GetError {
+ typedef GetError ValueType;
+ static const CommandId kCmdId = kGetError;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLenum Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _result_shm_id, uint32_t _result_shm_offset) {
+ SetHeader();
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd, uint32_t _result_shm_id, uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetError) == 12, Sizeof_GetError_is_not_12);
+COMPILE_ASSERT(offsetof(GetError, header) == 0, OffsetOf_GetError_header_not_0);
+COMPILE_ASSERT(offsetof(GetError, result_shm_id) == 4,
+ OffsetOf_GetError_result_shm_id_not_4);
+COMPILE_ASSERT(offsetof(GetError, result_shm_offset) == 8,
+ OffsetOf_GetError_result_shm_offset_not_8);
+
+struct GetFloatv {
+ typedef GetFloatv ValueType;
+ static const CommandId kCmdId = kGetFloatv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLfloat> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetFloatv) == 16, Sizeof_GetFloatv_is_not_16);
+COMPILE_ASSERT(offsetof(GetFloatv, header) == 0,
+ OffsetOf_GetFloatv_header_not_0);
+COMPILE_ASSERT(offsetof(GetFloatv, pname) == 4, OffsetOf_GetFloatv_pname_not_4);
+COMPILE_ASSERT(offsetof(GetFloatv, params_shm_id) == 8,
+ OffsetOf_GetFloatv_params_shm_id_not_8);
+COMPILE_ASSERT(offsetof(GetFloatv, params_shm_offset) == 12,
+ OffsetOf_GetFloatv_params_shm_offset_not_12);
+
+struct GetFramebufferAttachmentParameteriv {
+ typedef GetFramebufferAttachmentParameteriv ValueType;
+ static const CommandId kCmdId = kGetFramebufferAttachmentParameteriv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _attachment,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ target = _target;
+ attachment = _attachment;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _attachment,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _target, _attachment, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t attachment;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetFramebufferAttachmentParameteriv) == 24,
+ Sizeof_GetFramebufferAttachmentParameteriv_is_not_24);
+COMPILE_ASSERT(offsetof(GetFramebufferAttachmentParameteriv, header) == 0,
+ OffsetOf_GetFramebufferAttachmentParameteriv_header_not_0);
+COMPILE_ASSERT(offsetof(GetFramebufferAttachmentParameteriv, target) == 4,
+ OffsetOf_GetFramebufferAttachmentParameteriv_target_not_4);
+COMPILE_ASSERT(offsetof(GetFramebufferAttachmentParameteriv, attachment) == 8,
+ OffsetOf_GetFramebufferAttachmentParameteriv_attachment_not_8);
+COMPILE_ASSERT(offsetof(GetFramebufferAttachmentParameteriv, pname) == 12,
+ OffsetOf_GetFramebufferAttachmentParameteriv_pname_not_12);
+COMPILE_ASSERT(
+ offsetof(GetFramebufferAttachmentParameteriv, params_shm_id) == 16,
+ OffsetOf_GetFramebufferAttachmentParameteriv_params_shm_id_not_16);
+COMPILE_ASSERT(
+ offsetof(GetFramebufferAttachmentParameteriv, params_shm_offset) == 20,
+ OffsetOf_GetFramebufferAttachmentParameteriv_params_shm_offset_not_20);
+
+struct GetIntegerv {
+ typedef GetIntegerv ValueType;
+ static const CommandId kCmdId = kGetIntegerv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetIntegerv) == 16, Sizeof_GetIntegerv_is_not_16);
+COMPILE_ASSERT(offsetof(GetIntegerv, header) == 0,
+ OffsetOf_GetIntegerv_header_not_0);
+COMPILE_ASSERT(offsetof(GetIntegerv, pname) == 4,
+ OffsetOf_GetIntegerv_pname_not_4);
+COMPILE_ASSERT(offsetof(GetIntegerv, params_shm_id) == 8,
+ OffsetOf_GetIntegerv_params_shm_id_not_8);
+COMPILE_ASSERT(offsetof(GetIntegerv, params_shm_offset) == 12,
+ OffsetOf_GetIntegerv_params_shm_offset_not_12);
+
+struct GetProgramiv {
+ typedef GetProgramiv ValueType;
+ static const CommandId kCmdId = kGetProgramiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ program = _program;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_program, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetProgramiv) == 20, Sizeof_GetProgramiv_is_not_20);
+COMPILE_ASSERT(offsetof(GetProgramiv, header) == 0,
+ OffsetOf_GetProgramiv_header_not_0);
+COMPILE_ASSERT(offsetof(GetProgramiv, program) == 4,
+ OffsetOf_GetProgramiv_program_not_4);
+COMPILE_ASSERT(offsetof(GetProgramiv, pname) == 8,
+ OffsetOf_GetProgramiv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetProgramiv, params_shm_id) == 12,
+ OffsetOf_GetProgramiv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetProgramiv, params_shm_offset) == 16,
+ OffsetOf_GetProgramiv_params_shm_offset_not_16);
+
+struct GetProgramInfoLog {
+ typedef GetProgramInfoLog ValueType;
+ static const CommandId kCmdId = kGetProgramInfoLog;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, uint32_t _bucket_id) {
+ SetHeader();
+ program = _program;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _program, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_program, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetProgramInfoLog) == 12,
+ Sizeof_GetProgramInfoLog_is_not_12);
+COMPILE_ASSERT(offsetof(GetProgramInfoLog, header) == 0,
+ OffsetOf_GetProgramInfoLog_header_not_0);
+COMPILE_ASSERT(offsetof(GetProgramInfoLog, program) == 4,
+ OffsetOf_GetProgramInfoLog_program_not_4);
+COMPILE_ASSERT(offsetof(GetProgramInfoLog, bucket_id) == 8,
+ OffsetOf_GetProgramInfoLog_bucket_id_not_8);
+
+struct GetRenderbufferParameteriv {
+ typedef GetRenderbufferParameteriv ValueType;
+ static const CommandId kCmdId = kGetRenderbufferParameteriv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetRenderbufferParameteriv) == 20,
+ Sizeof_GetRenderbufferParameteriv_is_not_20);
+COMPILE_ASSERT(offsetof(GetRenderbufferParameteriv, header) == 0,
+ OffsetOf_GetRenderbufferParameteriv_header_not_0);
+COMPILE_ASSERT(offsetof(GetRenderbufferParameteriv, target) == 4,
+ OffsetOf_GetRenderbufferParameteriv_target_not_4);
+COMPILE_ASSERT(offsetof(GetRenderbufferParameteriv, pname) == 8,
+ OffsetOf_GetRenderbufferParameteriv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetRenderbufferParameteriv, params_shm_id) == 12,
+ OffsetOf_GetRenderbufferParameteriv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetRenderbufferParameteriv, params_shm_offset) == 16,
+ OffsetOf_GetRenderbufferParameteriv_params_shm_offset_not_16);
+
+struct GetShaderiv {
+ typedef GetShaderiv ValueType;
+ static const CommandId kCmdId = kGetShaderiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ shader = _shader;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _shader,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_shader, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetShaderiv) == 20, Sizeof_GetShaderiv_is_not_20);
+COMPILE_ASSERT(offsetof(GetShaderiv, header) == 0,
+ OffsetOf_GetShaderiv_header_not_0);
+COMPILE_ASSERT(offsetof(GetShaderiv, shader) == 4,
+ OffsetOf_GetShaderiv_shader_not_4);
+COMPILE_ASSERT(offsetof(GetShaderiv, pname) == 8,
+ OffsetOf_GetShaderiv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetShaderiv, params_shm_id) == 12,
+ OffsetOf_GetShaderiv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetShaderiv, params_shm_offset) == 16,
+ OffsetOf_GetShaderiv_params_shm_offset_not_16);
+
+struct GetShaderInfoLog {
+ typedef GetShaderInfoLog ValueType;
+ static const CommandId kCmdId = kGetShaderInfoLog;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader, uint32_t _bucket_id) {
+ SetHeader();
+ shader = _shader;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _shader, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_shader, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetShaderInfoLog) == 12,
+ Sizeof_GetShaderInfoLog_is_not_12);
+COMPILE_ASSERT(offsetof(GetShaderInfoLog, header) == 0,
+ OffsetOf_GetShaderInfoLog_header_not_0);
+COMPILE_ASSERT(offsetof(GetShaderInfoLog, shader) == 4,
+ OffsetOf_GetShaderInfoLog_shader_not_4);
+COMPILE_ASSERT(offsetof(GetShaderInfoLog, bucket_id) == 8,
+ OffsetOf_GetShaderInfoLog_bucket_id_not_8);
+
+struct GetShaderPrecisionFormat {
+ typedef GetShaderPrecisionFormat ValueType;
+ static const CommandId kCmdId = kGetShaderPrecisionFormat;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ struct Result {
+ int32_t success;
+ int32_t min_range;
+ int32_t max_range;
+ int32_t precision;
+ };
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _shadertype,
+ GLenum _precisiontype,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ shadertype = _shadertype;
+ precisiontype = _precisiontype;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _shadertype,
+ GLenum _precisiontype,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_shadertype, _precisiontype, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shadertype;
+ uint32_t precisiontype;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetShaderPrecisionFormat) == 20,
+ Sizeof_GetShaderPrecisionFormat_is_not_20);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat, header) == 0,
+ OffsetOf_GetShaderPrecisionFormat_header_not_0);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat, shadertype) == 4,
+ OffsetOf_GetShaderPrecisionFormat_shadertype_not_4);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat, precisiontype) == 8,
+ OffsetOf_GetShaderPrecisionFormat_precisiontype_not_8);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat, result_shm_id) == 12,
+ OffsetOf_GetShaderPrecisionFormat_result_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat, result_shm_offset) == 16,
+ OffsetOf_GetShaderPrecisionFormat_result_shm_offset_not_16);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat::Result, success) == 0,
+ OffsetOf_GetShaderPrecisionFormat_Result_success_not_0);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat::Result, min_range) == 4,
+ OffsetOf_GetShaderPrecisionFormat_Result_min_range_not_4);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat::Result, max_range) == 8,
+ OffsetOf_GetShaderPrecisionFormat_Result_max_range_not_8);
+COMPILE_ASSERT(offsetof(GetShaderPrecisionFormat::Result, precision) == 12,
+ OffsetOf_GetShaderPrecisionFormat_Result_precision_not_12);
+
+struct GetShaderSource {
+ typedef GetShaderSource ValueType;
+ static const CommandId kCmdId = kGetShaderSource;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader, uint32_t _bucket_id) {
+ SetHeader();
+ shader = _shader;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _shader, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_shader, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetShaderSource) == 12, Sizeof_GetShaderSource_is_not_12);
+COMPILE_ASSERT(offsetof(GetShaderSource, header) == 0,
+ OffsetOf_GetShaderSource_header_not_0);
+COMPILE_ASSERT(offsetof(GetShaderSource, shader) == 4,
+ OffsetOf_GetShaderSource_shader_not_4);
+COMPILE_ASSERT(offsetof(GetShaderSource, bucket_id) == 8,
+ OffsetOf_GetShaderSource_bucket_id_not_8);
+
+struct GetString {
+ typedef GetString ValueType;
+ static const CommandId kCmdId = kGetString;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _name, uint32_t _bucket_id) {
+ SetHeader();
+ name = _name;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLenum _name, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_name, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t name;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetString) == 12, Sizeof_GetString_is_not_12);
+COMPILE_ASSERT(offsetof(GetString, header) == 0,
+ OffsetOf_GetString_header_not_0);
+COMPILE_ASSERT(offsetof(GetString, name) == 4, OffsetOf_GetString_name_not_4);
+COMPILE_ASSERT(offsetof(GetString, bucket_id) == 8,
+ OffsetOf_GetString_bucket_id_not_8);
+
+struct GetTexParameterfv {
+ typedef GetTexParameterfv ValueType;
+ static const CommandId kCmdId = kGetTexParameterfv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLfloat> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetTexParameterfv) == 20,
+ Sizeof_GetTexParameterfv_is_not_20);
+COMPILE_ASSERT(offsetof(GetTexParameterfv, header) == 0,
+ OffsetOf_GetTexParameterfv_header_not_0);
+COMPILE_ASSERT(offsetof(GetTexParameterfv, target) == 4,
+ OffsetOf_GetTexParameterfv_target_not_4);
+COMPILE_ASSERT(offsetof(GetTexParameterfv, pname) == 8,
+ OffsetOf_GetTexParameterfv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetTexParameterfv, params_shm_id) == 12,
+ OffsetOf_GetTexParameterfv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetTexParameterfv, params_shm_offset) == 16,
+ OffsetOf_GetTexParameterfv_params_shm_offset_not_16);
+
+struct GetTexParameteriv {
+ typedef GetTexParameteriv ValueType;
+ static const CommandId kCmdId = kGetTexParameteriv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetTexParameteriv) == 20,
+ Sizeof_GetTexParameteriv_is_not_20);
+COMPILE_ASSERT(offsetof(GetTexParameteriv, header) == 0,
+ OffsetOf_GetTexParameteriv_header_not_0);
+COMPILE_ASSERT(offsetof(GetTexParameteriv, target) == 4,
+ OffsetOf_GetTexParameteriv_target_not_4);
+COMPILE_ASSERT(offsetof(GetTexParameteriv, pname) == 8,
+ OffsetOf_GetTexParameteriv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetTexParameteriv, params_shm_id) == 12,
+ OffsetOf_GetTexParameteriv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetTexParameteriv, params_shm_offset) == 16,
+ OffsetOf_GetTexParameteriv_params_shm_offset_not_16);
+
+struct GetUniformfv {
+ typedef GetUniformfv ValueType;
+ static const CommandId kCmdId = kGetUniformfv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLfloat> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLint _location,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ program = _program;
+ location = _location;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLint _location,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_program, _location, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ int32_t location;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetUniformfv) == 20, Sizeof_GetUniformfv_is_not_20);
+COMPILE_ASSERT(offsetof(GetUniformfv, header) == 0,
+ OffsetOf_GetUniformfv_header_not_0);
+COMPILE_ASSERT(offsetof(GetUniformfv, program) == 4,
+ OffsetOf_GetUniformfv_program_not_4);
+COMPILE_ASSERT(offsetof(GetUniformfv, location) == 8,
+ OffsetOf_GetUniformfv_location_not_8);
+COMPILE_ASSERT(offsetof(GetUniformfv, params_shm_id) == 12,
+ OffsetOf_GetUniformfv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetUniformfv, params_shm_offset) == 16,
+ OffsetOf_GetUniformfv_params_shm_offset_not_16);
+
+struct GetUniformiv {
+ typedef GetUniformiv ValueType;
+ static const CommandId kCmdId = kGetUniformiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLint _location,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ program = _program;
+ location = _location;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLint _location,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_program, _location, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ int32_t location;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetUniformiv) == 20, Sizeof_GetUniformiv_is_not_20);
+COMPILE_ASSERT(offsetof(GetUniformiv, header) == 0,
+ OffsetOf_GetUniformiv_header_not_0);
+COMPILE_ASSERT(offsetof(GetUniformiv, program) == 4,
+ OffsetOf_GetUniformiv_program_not_4);
+COMPILE_ASSERT(offsetof(GetUniformiv, location) == 8,
+ OffsetOf_GetUniformiv_location_not_8);
+COMPILE_ASSERT(offsetof(GetUniformiv, params_shm_id) == 12,
+ OffsetOf_GetUniformiv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetUniformiv, params_shm_offset) == 16,
+ OffsetOf_GetUniformiv_params_shm_offset_not_16);
+
+struct GetUniformLocation {
+ typedef GetUniformLocation ValueType;
+ static const CommandId kCmdId = kGetUniformLocation;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ SetHeader();
+ program = _program;
+ name_bucket_id = _name_bucket_id;
+ location_shm_id = _location_shm_id;
+ location_shm_offset = _location_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _program, _name_bucket_id, _location_shm_id, _location_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t name_bucket_id;
+ uint32_t location_shm_id;
+ uint32_t location_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetUniformLocation) == 20,
+ Sizeof_GetUniformLocation_is_not_20);
+COMPILE_ASSERT(offsetof(GetUniformLocation, header) == 0,
+ OffsetOf_GetUniformLocation_header_not_0);
+COMPILE_ASSERT(offsetof(GetUniformLocation, program) == 4,
+ OffsetOf_GetUniformLocation_program_not_4);
+COMPILE_ASSERT(offsetof(GetUniformLocation, name_bucket_id) == 8,
+ OffsetOf_GetUniformLocation_name_bucket_id_not_8);
+COMPILE_ASSERT(offsetof(GetUniformLocation, location_shm_id) == 12,
+ OffsetOf_GetUniformLocation_location_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetUniformLocation, location_shm_offset) == 16,
+ OffsetOf_GetUniformLocation_location_shm_offset_not_16);
+
+struct GetVertexAttribfv {
+ typedef GetVertexAttribfv ValueType;
+ static const CommandId kCmdId = kGetVertexAttribfv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLfloat> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ index = _index;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _index,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_index, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetVertexAttribfv) == 20,
+ Sizeof_GetVertexAttribfv_is_not_20);
+COMPILE_ASSERT(offsetof(GetVertexAttribfv, header) == 0,
+ OffsetOf_GetVertexAttribfv_header_not_0);
+COMPILE_ASSERT(offsetof(GetVertexAttribfv, index) == 4,
+ OffsetOf_GetVertexAttribfv_index_not_4);
+COMPILE_ASSERT(offsetof(GetVertexAttribfv, pname) == 8,
+ OffsetOf_GetVertexAttribfv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetVertexAttribfv, params_shm_id) == 12,
+ OffsetOf_GetVertexAttribfv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetVertexAttribfv, params_shm_offset) == 16,
+ OffsetOf_GetVertexAttribfv_params_shm_offset_not_16);
+
+struct GetVertexAttribiv {
+ typedef GetVertexAttribiv ValueType;
+ static const CommandId kCmdId = kGetVertexAttribiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ index = _index;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _index,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_index, _pname, _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetVertexAttribiv) == 20,
+ Sizeof_GetVertexAttribiv_is_not_20);
+COMPILE_ASSERT(offsetof(GetVertexAttribiv, header) == 0,
+ OffsetOf_GetVertexAttribiv_header_not_0);
+COMPILE_ASSERT(offsetof(GetVertexAttribiv, index) == 4,
+ OffsetOf_GetVertexAttribiv_index_not_4);
+COMPILE_ASSERT(offsetof(GetVertexAttribiv, pname) == 8,
+ OffsetOf_GetVertexAttribiv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetVertexAttribiv, params_shm_id) == 12,
+ OffsetOf_GetVertexAttribiv_params_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetVertexAttribiv, params_shm_offset) == 16,
+ OffsetOf_GetVertexAttribiv_params_shm_offset_not_16);
+
+struct GetVertexAttribPointerv {
+ typedef GetVertexAttribPointerv ValueType;
+ static const CommandId kCmdId = kGetVertexAttribPointerv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLuint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index,
+ GLenum _pname,
+ uint32_t _pointer_shm_id,
+ uint32_t _pointer_shm_offset) {
+ SetHeader();
+ index = _index;
+ pname = _pname;
+ pointer_shm_id = _pointer_shm_id;
+ pointer_shm_offset = _pointer_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _index,
+ GLenum _pname,
+ uint32_t _pointer_shm_id,
+ uint32_t _pointer_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_index, _pname, _pointer_shm_id, _pointer_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+ uint32_t pname;
+ uint32_t pointer_shm_id;
+ uint32_t pointer_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetVertexAttribPointerv) == 20,
+ Sizeof_GetVertexAttribPointerv_is_not_20);
+COMPILE_ASSERT(offsetof(GetVertexAttribPointerv, header) == 0,
+ OffsetOf_GetVertexAttribPointerv_header_not_0);
+COMPILE_ASSERT(offsetof(GetVertexAttribPointerv, index) == 4,
+ OffsetOf_GetVertexAttribPointerv_index_not_4);
+COMPILE_ASSERT(offsetof(GetVertexAttribPointerv, pname) == 8,
+ OffsetOf_GetVertexAttribPointerv_pname_not_8);
+COMPILE_ASSERT(offsetof(GetVertexAttribPointerv, pointer_shm_id) == 12,
+ OffsetOf_GetVertexAttribPointerv_pointer_shm_id_not_12);
+COMPILE_ASSERT(offsetof(GetVertexAttribPointerv, pointer_shm_offset) == 16,
+ OffsetOf_GetVertexAttribPointerv_pointer_shm_offset_not_16);
+
+struct Hint {
+ typedef Hint ValueType;
+ static const CommandId kCmdId = kHint;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLenum _mode) {
+ SetHeader();
+ target = _target;
+ mode = _mode;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _mode) {
+ static_cast<ValueType*>(cmd)->Init(_target, _mode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t mode;
+};
+
+COMPILE_ASSERT(sizeof(Hint) == 12, Sizeof_Hint_is_not_12);
+COMPILE_ASSERT(offsetof(Hint, header) == 0, OffsetOf_Hint_header_not_0);
+COMPILE_ASSERT(offsetof(Hint, target) == 4, OffsetOf_Hint_target_not_4);
+COMPILE_ASSERT(offsetof(Hint, mode) == 8, OffsetOf_Hint_mode_not_8);
+
+struct IsBuffer {
+ typedef IsBuffer ValueType;
+ static const CommandId kCmdId = kIsBuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ buffer = _buffer;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _buffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_buffer, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buffer;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsBuffer) == 16, Sizeof_IsBuffer_is_not_16);
+COMPILE_ASSERT(offsetof(IsBuffer, header) == 0, OffsetOf_IsBuffer_header_not_0);
+COMPILE_ASSERT(offsetof(IsBuffer, buffer) == 4, OffsetOf_IsBuffer_buffer_not_4);
+COMPILE_ASSERT(offsetof(IsBuffer, result_shm_id) == 8,
+ OffsetOf_IsBuffer_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsBuffer, result_shm_offset) == 12,
+ OffsetOf_IsBuffer_result_shm_offset_not_12);
+
+struct IsEnabled {
+ typedef IsEnabled ValueType;
+ static const CommandId kCmdId = kIsEnabled;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _cap, uint32_t _result_shm_id, uint32_t _result_shm_offset) {
+ SetHeader();
+ cap = _cap;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _cap,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_cap, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t cap;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsEnabled) == 16, Sizeof_IsEnabled_is_not_16);
+COMPILE_ASSERT(offsetof(IsEnabled, header) == 0,
+ OffsetOf_IsEnabled_header_not_0);
+COMPILE_ASSERT(offsetof(IsEnabled, cap) == 4, OffsetOf_IsEnabled_cap_not_4);
+COMPILE_ASSERT(offsetof(IsEnabled, result_shm_id) == 8,
+ OffsetOf_IsEnabled_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsEnabled, result_shm_offset) == 12,
+ OffsetOf_IsEnabled_result_shm_offset_not_12);
+
+struct IsFramebuffer {
+ typedef IsFramebuffer ValueType;
+ static const CommandId kCmdId = kIsFramebuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _framebuffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ framebuffer = _framebuffer;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _framebuffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_framebuffer, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t framebuffer;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsFramebuffer) == 16, Sizeof_IsFramebuffer_is_not_16);
+COMPILE_ASSERT(offsetof(IsFramebuffer, header) == 0,
+ OffsetOf_IsFramebuffer_header_not_0);
+COMPILE_ASSERT(offsetof(IsFramebuffer, framebuffer) == 4,
+ OffsetOf_IsFramebuffer_framebuffer_not_4);
+COMPILE_ASSERT(offsetof(IsFramebuffer, result_shm_id) == 8,
+ OffsetOf_IsFramebuffer_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsFramebuffer, result_shm_offset) == 12,
+ OffsetOf_IsFramebuffer_result_shm_offset_not_12);
+
+struct IsProgram {
+ typedef IsProgram ValueType;
+ static const CommandId kCmdId = kIsProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ program = _program;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_program, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsProgram) == 16, Sizeof_IsProgram_is_not_16);
+COMPILE_ASSERT(offsetof(IsProgram, header) == 0,
+ OffsetOf_IsProgram_header_not_0);
+COMPILE_ASSERT(offsetof(IsProgram, program) == 4,
+ OffsetOf_IsProgram_program_not_4);
+COMPILE_ASSERT(offsetof(IsProgram, result_shm_id) == 8,
+ OffsetOf_IsProgram_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsProgram, result_shm_offset) == 12,
+ OffsetOf_IsProgram_result_shm_offset_not_12);
+
+struct IsRenderbuffer {
+ typedef IsRenderbuffer ValueType;
+ static const CommandId kCmdId = kIsRenderbuffer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _renderbuffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ renderbuffer = _renderbuffer;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _renderbuffer,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_renderbuffer, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t renderbuffer;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsRenderbuffer) == 16, Sizeof_IsRenderbuffer_is_not_16);
+COMPILE_ASSERT(offsetof(IsRenderbuffer, header) == 0,
+ OffsetOf_IsRenderbuffer_header_not_0);
+COMPILE_ASSERT(offsetof(IsRenderbuffer, renderbuffer) == 4,
+ OffsetOf_IsRenderbuffer_renderbuffer_not_4);
+COMPILE_ASSERT(offsetof(IsRenderbuffer, result_shm_id) == 8,
+ OffsetOf_IsRenderbuffer_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsRenderbuffer, result_shm_offset) == 12,
+ OffsetOf_IsRenderbuffer_result_shm_offset_not_12);
+
+struct IsShader {
+ typedef IsShader ValueType;
+ static const CommandId kCmdId = kIsShader;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ shader = _shader;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _shader,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_shader, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsShader) == 16, Sizeof_IsShader_is_not_16);
+COMPILE_ASSERT(offsetof(IsShader, header) == 0, OffsetOf_IsShader_header_not_0);
+COMPILE_ASSERT(offsetof(IsShader, shader) == 4, OffsetOf_IsShader_shader_not_4);
+COMPILE_ASSERT(offsetof(IsShader, result_shm_id) == 8,
+ OffsetOf_IsShader_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsShader, result_shm_offset) == 12,
+ OffsetOf_IsShader_result_shm_offset_not_12);
+
+struct IsTexture {
+ typedef IsTexture ValueType;
+ static const CommandId kCmdId = kIsTexture;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ texture = _texture;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_texture, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsTexture) == 16, Sizeof_IsTexture_is_not_16);
+COMPILE_ASSERT(offsetof(IsTexture, header) == 0,
+ OffsetOf_IsTexture_header_not_0);
+COMPILE_ASSERT(offsetof(IsTexture, texture) == 4,
+ OffsetOf_IsTexture_texture_not_4);
+COMPILE_ASSERT(offsetof(IsTexture, result_shm_id) == 8,
+ OffsetOf_IsTexture_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsTexture, result_shm_offset) == 12,
+ OffsetOf_IsTexture_result_shm_offset_not_12);
+
+struct LineWidth {
+ typedef LineWidth ValueType;
+ static const CommandId kCmdId = kLineWidth;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLfloat _width) {
+ SetHeader();
+ width = _width;
+ }
+
+ void* Set(void* cmd, GLfloat _width) {
+ static_cast<ValueType*>(cmd)->Init(_width);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float width;
+};
+
+COMPILE_ASSERT(sizeof(LineWidth) == 8, Sizeof_LineWidth_is_not_8);
+COMPILE_ASSERT(offsetof(LineWidth, header) == 0,
+ OffsetOf_LineWidth_header_not_0);
+COMPILE_ASSERT(offsetof(LineWidth, width) == 4, OffsetOf_LineWidth_width_not_4);
+
+struct LinkProgram {
+ typedef LinkProgram ValueType;
+ static const CommandId kCmdId = kLinkProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program) {
+ SetHeader();
+ program = _program;
+ }
+
+ void* Set(void* cmd, GLuint _program) {
+ static_cast<ValueType*>(cmd)->Init(_program);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+};
+
+COMPILE_ASSERT(sizeof(LinkProgram) == 8, Sizeof_LinkProgram_is_not_8);
+COMPILE_ASSERT(offsetof(LinkProgram, header) == 0,
+ OffsetOf_LinkProgram_header_not_0);
+COMPILE_ASSERT(offsetof(LinkProgram, program) == 4,
+ OffsetOf_LinkProgram_program_not_4);
+
+struct PixelStorei {
+ typedef PixelStorei ValueType;
+ static const CommandId kCmdId = kPixelStorei;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname, GLint _param) {
+ SetHeader();
+ pname = _pname;
+ param = _param;
+ }
+
+ void* Set(void* cmd, GLenum _pname, GLint _param) {
+ static_cast<ValueType*>(cmd)->Init(_pname, _param);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ int32_t param;
+};
+
+COMPILE_ASSERT(sizeof(PixelStorei) == 12, Sizeof_PixelStorei_is_not_12);
+COMPILE_ASSERT(offsetof(PixelStorei, header) == 0,
+ OffsetOf_PixelStorei_header_not_0);
+COMPILE_ASSERT(offsetof(PixelStorei, pname) == 4,
+ OffsetOf_PixelStorei_pname_not_4);
+COMPILE_ASSERT(offsetof(PixelStorei, param) == 8,
+ OffsetOf_PixelStorei_param_not_8);
+
+struct PolygonOffset {
+ typedef PolygonOffset ValueType;
+ static const CommandId kCmdId = kPolygonOffset;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLfloat _factor, GLfloat _units) {
+ SetHeader();
+ factor = _factor;
+ units = _units;
+ }
+
+ void* Set(void* cmd, GLfloat _factor, GLfloat _units) {
+ static_cast<ValueType*>(cmd)->Init(_factor, _units);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float factor;
+ float units;
+};
+
+COMPILE_ASSERT(sizeof(PolygonOffset) == 12, Sizeof_PolygonOffset_is_not_12);
+COMPILE_ASSERT(offsetof(PolygonOffset, header) == 0,
+ OffsetOf_PolygonOffset_header_not_0);
+COMPILE_ASSERT(offsetof(PolygonOffset, factor) == 4,
+ OffsetOf_PolygonOffset_factor_not_4);
+COMPILE_ASSERT(offsetof(PolygonOffset, units) == 8,
+ OffsetOf_PolygonOffset_units_not_8);
+
+// ReadPixels has the result separated from the pixel buffer so that
+// it is easier to specify the result going to some specific place
+// that exactly fits the rectangle of pixels.
+struct ReadPixels {
+ typedef ReadPixels ValueType;
+ static const CommandId kCmdId = kReadPixels;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset,
+ GLboolean _async) {
+ SetHeader();
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ format = _format;
+ type = _type;
+ pixels_shm_id = _pixels_shm_id;
+ pixels_shm_offset = _pixels_shm_offset;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ async = _async;
+ }
+
+ void* Set(void* cmd,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset,
+ GLboolean _async) {
+ static_cast<ValueType*>(cmd)->Init(_x,
+ _y,
+ _width,
+ _height,
+ _format,
+ _type,
+ _pixels_shm_id,
+ _pixels_shm_offset,
+ _result_shm_id,
+ _result_shm_offset,
+ _async);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t type;
+ uint32_t pixels_shm_id;
+ uint32_t pixels_shm_offset;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+ uint32_t async;
+};
+
+COMPILE_ASSERT(sizeof(ReadPixels) == 48, Sizeof_ReadPixels_is_not_48);
+COMPILE_ASSERT(offsetof(ReadPixels, header) == 0,
+ OffsetOf_ReadPixels_header_not_0);
+COMPILE_ASSERT(offsetof(ReadPixels, x) == 4, OffsetOf_ReadPixels_x_not_4);
+COMPILE_ASSERT(offsetof(ReadPixels, y) == 8, OffsetOf_ReadPixels_y_not_8);
+COMPILE_ASSERT(offsetof(ReadPixels, width) == 12,
+ OffsetOf_ReadPixels_width_not_12);
+COMPILE_ASSERT(offsetof(ReadPixels, height) == 16,
+ OffsetOf_ReadPixels_height_not_16);
+COMPILE_ASSERT(offsetof(ReadPixels, format) == 20,
+ OffsetOf_ReadPixels_format_not_20);
+COMPILE_ASSERT(offsetof(ReadPixels, type) == 24,
+ OffsetOf_ReadPixels_type_not_24);
+COMPILE_ASSERT(offsetof(ReadPixels, pixels_shm_id) == 28,
+ OffsetOf_ReadPixels_pixels_shm_id_not_28);
+COMPILE_ASSERT(offsetof(ReadPixels, pixels_shm_offset) == 32,
+ OffsetOf_ReadPixels_pixels_shm_offset_not_32);
+COMPILE_ASSERT(offsetof(ReadPixels, result_shm_id) == 36,
+ OffsetOf_ReadPixels_result_shm_id_not_36);
+COMPILE_ASSERT(offsetof(ReadPixels, result_shm_offset) == 40,
+ OffsetOf_ReadPixels_result_shm_offset_not_40);
+COMPILE_ASSERT(offsetof(ReadPixels, async) == 44,
+ OffsetOf_ReadPixels_async_not_44);
+
+struct ReleaseShaderCompiler {
+ typedef ReleaseShaderCompiler ValueType;
+ static const CommandId kCmdId = kReleaseShaderCompiler;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(ReleaseShaderCompiler) == 4,
+ Sizeof_ReleaseShaderCompiler_is_not_4);
+COMPILE_ASSERT(offsetof(ReleaseShaderCompiler, header) == 0,
+ OffsetOf_ReleaseShaderCompiler_header_not_0);
+
+struct RenderbufferStorage {
+ typedef RenderbufferStorage ValueType;
+ static const CommandId kCmdId = kRenderbufferStorage;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _internalformat, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(RenderbufferStorage) == 20,
+ Sizeof_RenderbufferStorage_is_not_20);
+COMPILE_ASSERT(offsetof(RenderbufferStorage, header) == 0,
+ OffsetOf_RenderbufferStorage_header_not_0);
+COMPILE_ASSERT(offsetof(RenderbufferStorage, target) == 4,
+ OffsetOf_RenderbufferStorage_target_not_4);
+COMPILE_ASSERT(offsetof(RenderbufferStorage, internalformat) == 8,
+ OffsetOf_RenderbufferStorage_internalformat_not_8);
+COMPILE_ASSERT(offsetof(RenderbufferStorage, width) == 12,
+ OffsetOf_RenderbufferStorage_width_not_12);
+COMPILE_ASSERT(offsetof(RenderbufferStorage, height) == 16,
+ OffsetOf_RenderbufferStorage_height_not_16);
+
+struct SampleCoverage {
+ typedef SampleCoverage ValueType;
+ static const CommandId kCmdId = kSampleCoverage;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLclampf _value, GLboolean _invert) {
+ SetHeader();
+ value = _value;
+ invert = _invert;
+ }
+
+ void* Set(void* cmd, GLclampf _value, GLboolean _invert) {
+ static_cast<ValueType*>(cmd)->Init(_value, _invert);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float value;
+ uint32_t invert;
+};
+
+COMPILE_ASSERT(sizeof(SampleCoverage) == 12, Sizeof_SampleCoverage_is_not_12);
+COMPILE_ASSERT(offsetof(SampleCoverage, header) == 0,
+ OffsetOf_SampleCoverage_header_not_0);
+COMPILE_ASSERT(offsetof(SampleCoverage, value) == 4,
+ OffsetOf_SampleCoverage_value_not_4);
+COMPILE_ASSERT(offsetof(SampleCoverage, invert) == 8,
+ OffsetOf_SampleCoverage_invert_not_8);
+
+struct Scissor {
+ typedef Scissor ValueType;
+ static const CommandId kCmdId = kScissor;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _x, GLint _y, GLsizei _width, GLsizei _height) {
+ SetHeader();
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd, GLint _x, GLint _y, GLsizei _width, GLsizei _height) {
+ static_cast<ValueType*>(cmd)->Init(_x, _y, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(Scissor) == 20, Sizeof_Scissor_is_not_20);
+COMPILE_ASSERT(offsetof(Scissor, header) == 0, OffsetOf_Scissor_header_not_0);
+COMPILE_ASSERT(offsetof(Scissor, x) == 4, OffsetOf_Scissor_x_not_4);
+COMPILE_ASSERT(offsetof(Scissor, y) == 8, OffsetOf_Scissor_y_not_8);
+COMPILE_ASSERT(offsetof(Scissor, width) == 12, OffsetOf_Scissor_width_not_12);
+COMPILE_ASSERT(offsetof(Scissor, height) == 16, OffsetOf_Scissor_height_not_16);
+
+struct ShaderBinary {
+ typedef ShaderBinary ValueType;
+ static const CommandId kCmdId = kShaderBinary;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLsizei _n,
+ uint32_t _shaders_shm_id,
+ uint32_t _shaders_shm_offset,
+ GLenum _binaryformat,
+ uint32_t _binary_shm_id,
+ uint32_t _binary_shm_offset,
+ GLsizei _length) {
+ SetHeader();
+ n = _n;
+ shaders_shm_id = _shaders_shm_id;
+ shaders_shm_offset = _shaders_shm_offset;
+ binaryformat = _binaryformat;
+ binary_shm_id = _binary_shm_id;
+ binary_shm_offset = _binary_shm_offset;
+ length = _length;
+ }
+
+ void* Set(void* cmd,
+ GLsizei _n,
+ uint32_t _shaders_shm_id,
+ uint32_t _shaders_shm_offset,
+ GLenum _binaryformat,
+ uint32_t _binary_shm_id,
+ uint32_t _binary_shm_offset,
+ GLsizei _length) {
+ static_cast<ValueType*>(cmd)->Init(_n,
+ _shaders_shm_id,
+ _shaders_shm_offset,
+ _binaryformat,
+ _binary_shm_id,
+ _binary_shm_offset,
+ _length);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+ uint32_t shaders_shm_id;
+ uint32_t shaders_shm_offset;
+ uint32_t binaryformat;
+ uint32_t binary_shm_id;
+ uint32_t binary_shm_offset;
+ int32_t length;
+};
+
+COMPILE_ASSERT(sizeof(ShaderBinary) == 32, Sizeof_ShaderBinary_is_not_32);
+COMPILE_ASSERT(offsetof(ShaderBinary, header) == 0,
+ OffsetOf_ShaderBinary_header_not_0);
+COMPILE_ASSERT(offsetof(ShaderBinary, n) == 4, OffsetOf_ShaderBinary_n_not_4);
+COMPILE_ASSERT(offsetof(ShaderBinary, shaders_shm_id) == 8,
+ OffsetOf_ShaderBinary_shaders_shm_id_not_8);
+COMPILE_ASSERT(offsetof(ShaderBinary, shaders_shm_offset) == 12,
+ OffsetOf_ShaderBinary_shaders_shm_offset_not_12);
+COMPILE_ASSERT(offsetof(ShaderBinary, binaryformat) == 16,
+ OffsetOf_ShaderBinary_binaryformat_not_16);
+COMPILE_ASSERT(offsetof(ShaderBinary, binary_shm_id) == 20,
+ OffsetOf_ShaderBinary_binary_shm_id_not_20);
+COMPILE_ASSERT(offsetof(ShaderBinary, binary_shm_offset) == 24,
+ OffsetOf_ShaderBinary_binary_shm_offset_not_24);
+COMPILE_ASSERT(offsetof(ShaderBinary, length) == 28,
+ OffsetOf_ShaderBinary_length_not_28);
+
+struct ShaderSourceBucket {
+ typedef ShaderSourceBucket ValueType;
+ static const CommandId kCmdId = kShaderSourceBucket;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader, uint32_t _data_bucket_id) {
+ SetHeader();
+ shader = _shader;
+ data_bucket_id = _data_bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _shader, uint32_t _data_bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_shader, _data_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t data_bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(ShaderSourceBucket) == 12,
+ Sizeof_ShaderSourceBucket_is_not_12);
+COMPILE_ASSERT(offsetof(ShaderSourceBucket, header) == 0,
+ OffsetOf_ShaderSourceBucket_header_not_0);
+COMPILE_ASSERT(offsetof(ShaderSourceBucket, shader) == 4,
+ OffsetOf_ShaderSourceBucket_shader_not_4);
+COMPILE_ASSERT(offsetof(ShaderSourceBucket, data_bucket_id) == 8,
+ OffsetOf_ShaderSourceBucket_data_bucket_id_not_8);
+
+struct StencilFunc {
+ typedef StencilFunc ValueType;
+ static const CommandId kCmdId = kStencilFunc;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _func, GLint _ref, GLuint _mask) {
+ SetHeader();
+ func = _func;
+ ref = _ref;
+ mask = _mask;
+ }
+
+ void* Set(void* cmd, GLenum _func, GLint _ref, GLuint _mask) {
+ static_cast<ValueType*>(cmd)->Init(_func, _ref, _mask);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t func;
+ int32_t ref;
+ uint32_t mask;
+};
+
+COMPILE_ASSERT(sizeof(StencilFunc) == 16, Sizeof_StencilFunc_is_not_16);
+COMPILE_ASSERT(offsetof(StencilFunc, header) == 0,
+ OffsetOf_StencilFunc_header_not_0);
+COMPILE_ASSERT(offsetof(StencilFunc, func) == 4,
+ OffsetOf_StencilFunc_func_not_4);
+COMPILE_ASSERT(offsetof(StencilFunc, ref) == 8, OffsetOf_StencilFunc_ref_not_8);
+COMPILE_ASSERT(offsetof(StencilFunc, mask) == 12,
+ OffsetOf_StencilFunc_mask_not_12);
+
+struct StencilFuncSeparate {
+ typedef StencilFuncSeparate ValueType;
+ static const CommandId kCmdId = kStencilFuncSeparate;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _face, GLenum _func, GLint _ref, GLuint _mask) {
+ SetHeader();
+ face = _face;
+ func = _func;
+ ref = _ref;
+ mask = _mask;
+ }
+
+ void* Set(void* cmd, GLenum _face, GLenum _func, GLint _ref, GLuint _mask) {
+ static_cast<ValueType*>(cmd)->Init(_face, _func, _ref, _mask);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t face;
+ uint32_t func;
+ int32_t ref;
+ uint32_t mask;
+};
+
+COMPILE_ASSERT(sizeof(StencilFuncSeparate) == 20,
+ Sizeof_StencilFuncSeparate_is_not_20);
+COMPILE_ASSERT(offsetof(StencilFuncSeparate, header) == 0,
+ OffsetOf_StencilFuncSeparate_header_not_0);
+COMPILE_ASSERT(offsetof(StencilFuncSeparate, face) == 4,
+ OffsetOf_StencilFuncSeparate_face_not_4);
+COMPILE_ASSERT(offsetof(StencilFuncSeparate, func) == 8,
+ OffsetOf_StencilFuncSeparate_func_not_8);
+COMPILE_ASSERT(offsetof(StencilFuncSeparate, ref) == 12,
+ OffsetOf_StencilFuncSeparate_ref_not_12);
+COMPILE_ASSERT(offsetof(StencilFuncSeparate, mask) == 16,
+ OffsetOf_StencilFuncSeparate_mask_not_16);
+
+struct StencilMask {
+ typedef StencilMask ValueType;
+ static const CommandId kCmdId = kStencilMask;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _mask) {
+ SetHeader();
+ mask = _mask;
+ }
+
+ void* Set(void* cmd, GLuint _mask) {
+ static_cast<ValueType*>(cmd)->Init(_mask);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mask;
+};
+
+COMPILE_ASSERT(sizeof(StencilMask) == 8, Sizeof_StencilMask_is_not_8);
+COMPILE_ASSERT(offsetof(StencilMask, header) == 0,
+ OffsetOf_StencilMask_header_not_0);
+COMPILE_ASSERT(offsetof(StencilMask, mask) == 4,
+ OffsetOf_StencilMask_mask_not_4);
+
+struct StencilMaskSeparate {
+ typedef StencilMaskSeparate ValueType;
+ static const CommandId kCmdId = kStencilMaskSeparate;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _face, GLuint _mask) {
+ SetHeader();
+ face = _face;
+ mask = _mask;
+ }
+
+ void* Set(void* cmd, GLenum _face, GLuint _mask) {
+ static_cast<ValueType*>(cmd)->Init(_face, _mask);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t face;
+ uint32_t mask;
+};
+
+COMPILE_ASSERT(sizeof(StencilMaskSeparate) == 12,
+ Sizeof_StencilMaskSeparate_is_not_12);
+COMPILE_ASSERT(offsetof(StencilMaskSeparate, header) == 0,
+ OffsetOf_StencilMaskSeparate_header_not_0);
+COMPILE_ASSERT(offsetof(StencilMaskSeparate, face) == 4,
+ OffsetOf_StencilMaskSeparate_face_not_4);
+COMPILE_ASSERT(offsetof(StencilMaskSeparate, mask) == 8,
+ OffsetOf_StencilMaskSeparate_mask_not_8);
+
+struct StencilOp {
+ typedef StencilOp ValueType;
+ static const CommandId kCmdId = kStencilOp;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _fail, GLenum _zfail, GLenum _zpass) {
+ SetHeader();
+ fail = _fail;
+ zfail = _zfail;
+ zpass = _zpass;
+ }
+
+ void* Set(void* cmd, GLenum _fail, GLenum _zfail, GLenum _zpass) {
+ static_cast<ValueType*>(cmd)->Init(_fail, _zfail, _zpass);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t fail;
+ uint32_t zfail;
+ uint32_t zpass;
+};
+
+COMPILE_ASSERT(sizeof(StencilOp) == 16, Sizeof_StencilOp_is_not_16);
+COMPILE_ASSERT(offsetof(StencilOp, header) == 0,
+ OffsetOf_StencilOp_header_not_0);
+COMPILE_ASSERT(offsetof(StencilOp, fail) == 4, OffsetOf_StencilOp_fail_not_4);
+COMPILE_ASSERT(offsetof(StencilOp, zfail) == 8, OffsetOf_StencilOp_zfail_not_8);
+COMPILE_ASSERT(offsetof(StencilOp, zpass) == 12,
+ OffsetOf_StencilOp_zpass_not_12);
+
+struct StencilOpSeparate {
+ typedef StencilOpSeparate ValueType;
+ static const CommandId kCmdId = kStencilOpSeparate;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _face, GLenum _fail, GLenum _zfail, GLenum _zpass) {
+ SetHeader();
+ face = _face;
+ fail = _fail;
+ zfail = _zfail;
+ zpass = _zpass;
+ }
+
+ void* Set(void* cmd,
+ GLenum _face,
+ GLenum _fail,
+ GLenum _zfail,
+ GLenum _zpass) {
+ static_cast<ValueType*>(cmd)->Init(_face, _fail, _zfail, _zpass);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t face;
+ uint32_t fail;
+ uint32_t zfail;
+ uint32_t zpass;
+};
+
+COMPILE_ASSERT(sizeof(StencilOpSeparate) == 20,
+ Sizeof_StencilOpSeparate_is_not_20);
+COMPILE_ASSERT(offsetof(StencilOpSeparate, header) == 0,
+ OffsetOf_StencilOpSeparate_header_not_0);
+COMPILE_ASSERT(offsetof(StencilOpSeparate, face) == 4,
+ OffsetOf_StencilOpSeparate_face_not_4);
+COMPILE_ASSERT(offsetof(StencilOpSeparate, fail) == 8,
+ OffsetOf_StencilOpSeparate_fail_not_8);
+COMPILE_ASSERT(offsetof(StencilOpSeparate, zfail) == 12,
+ OffsetOf_StencilOpSeparate_zfail_not_12);
+COMPILE_ASSERT(offsetof(StencilOpSeparate, zpass) == 16,
+ OffsetOf_StencilOpSeparate_zpass_not_16);
+
+struct TexImage2D {
+ typedef TexImage2D ValueType;
+ static const CommandId kCmdId = kTexImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ format = _format;
+ type = _type;
+ pixels_shm_id = _pixels_shm_id;
+ pixels_shm_offset = _pixels_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _internalformat,
+ _width,
+ _height,
+ _format,
+ _type,
+ _pixels_shm_id,
+ _pixels_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t internalformat;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t type;
+ uint32_t pixels_shm_id;
+ uint32_t pixels_shm_offset;
+ static const int32_t border = 0;
+};
+
+COMPILE_ASSERT(sizeof(TexImage2D) == 40, Sizeof_TexImage2D_is_not_40);
+COMPILE_ASSERT(offsetof(TexImage2D, header) == 0,
+ OffsetOf_TexImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(TexImage2D, target) == 4,
+ OffsetOf_TexImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(TexImage2D, level) == 8,
+ OffsetOf_TexImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(TexImage2D, internalformat) == 12,
+ OffsetOf_TexImage2D_internalformat_not_12);
+COMPILE_ASSERT(offsetof(TexImage2D, width) == 16,
+ OffsetOf_TexImage2D_width_not_16);
+COMPILE_ASSERT(offsetof(TexImage2D, height) == 20,
+ OffsetOf_TexImage2D_height_not_20);
+COMPILE_ASSERT(offsetof(TexImage2D, format) == 24,
+ OffsetOf_TexImage2D_format_not_24);
+COMPILE_ASSERT(offsetof(TexImage2D, type) == 28,
+ OffsetOf_TexImage2D_type_not_28);
+COMPILE_ASSERT(offsetof(TexImage2D, pixels_shm_id) == 32,
+ OffsetOf_TexImage2D_pixels_shm_id_not_32);
+COMPILE_ASSERT(offsetof(TexImage2D, pixels_shm_offset) == 36,
+ OffsetOf_TexImage2D_pixels_shm_offset_not_36);
+
+struct TexParameterf {
+ typedef TexParameterf ValueType;
+ static const CommandId kCmdId = kTexParameterf;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLenum _pname, GLfloat _param) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ param = _param;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _pname, GLfloat _param) {
+ static_cast<ValueType*>(cmd)->Init(_target, _pname, _param);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ float param;
+};
+
+COMPILE_ASSERT(sizeof(TexParameterf) == 16, Sizeof_TexParameterf_is_not_16);
+COMPILE_ASSERT(offsetof(TexParameterf, header) == 0,
+ OffsetOf_TexParameterf_header_not_0);
+COMPILE_ASSERT(offsetof(TexParameterf, target) == 4,
+ OffsetOf_TexParameterf_target_not_4);
+COMPILE_ASSERT(offsetof(TexParameterf, pname) == 8,
+ OffsetOf_TexParameterf_pname_not_8);
+COMPILE_ASSERT(offsetof(TexParameterf, param) == 12,
+ OffsetOf_TexParameterf_param_not_12);
+
+struct TexParameterfvImmediate {
+ typedef TexParameterfvImmediate ValueType;
+ static const CommandId kCmdId = kTexParameterfvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 1); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLenum _target, GLenum _pname, const GLfloat* _params) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ memcpy(ImmediateDataAddress(this), _params, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _pname, const GLfloat* _params) {
+ static_cast<ValueType*>(cmd)->Init(_target, _pname, _params);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+};
+
+COMPILE_ASSERT(sizeof(TexParameterfvImmediate) == 12,
+ Sizeof_TexParameterfvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(TexParameterfvImmediate, header) == 0,
+ OffsetOf_TexParameterfvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(TexParameterfvImmediate, target) == 4,
+ OffsetOf_TexParameterfvImmediate_target_not_4);
+COMPILE_ASSERT(offsetof(TexParameterfvImmediate, pname) == 8,
+ OffsetOf_TexParameterfvImmediate_pname_not_8);
+
+struct TexParameteri {
+ typedef TexParameteri ValueType;
+ static const CommandId kCmdId = kTexParameteri;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLenum _pname, GLint _param) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ param = _param;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _pname, GLint _param) {
+ static_cast<ValueType*>(cmd)->Init(_target, _pname, _param);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ int32_t param;
+};
+
+COMPILE_ASSERT(sizeof(TexParameteri) == 16, Sizeof_TexParameteri_is_not_16);
+COMPILE_ASSERT(offsetof(TexParameteri, header) == 0,
+ OffsetOf_TexParameteri_header_not_0);
+COMPILE_ASSERT(offsetof(TexParameteri, target) == 4,
+ OffsetOf_TexParameteri_target_not_4);
+COMPILE_ASSERT(offsetof(TexParameteri, pname) == 8,
+ OffsetOf_TexParameteri_pname_not_8);
+COMPILE_ASSERT(offsetof(TexParameteri, param) == 12,
+ OffsetOf_TexParameteri_param_not_12);
+
+struct TexParameterivImmediate {
+ typedef TexParameterivImmediate ValueType;
+ static const CommandId kCmdId = kTexParameterivImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLint) * 1); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLenum _target, GLenum _pname, const GLint* _params) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ memcpy(ImmediateDataAddress(this), _params, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _pname, const GLint* _params) {
+ static_cast<ValueType*>(cmd)->Init(_target, _pname, _params);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+};
+
+COMPILE_ASSERT(sizeof(TexParameterivImmediate) == 12,
+ Sizeof_TexParameterivImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(TexParameterivImmediate, header) == 0,
+ OffsetOf_TexParameterivImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(TexParameterivImmediate, target) == 4,
+ OffsetOf_TexParameterivImmediate_target_not_4);
+COMPILE_ASSERT(offsetof(TexParameterivImmediate, pname) == 8,
+ OffsetOf_TexParameterivImmediate_pname_not_8);
+
+struct TexSubImage2D {
+ typedef TexSubImage2D ValueType;
+ static const CommandId kCmdId = kTexSubImage2D;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ GLboolean _internal) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ xoffset = _xoffset;
+ yoffset = _yoffset;
+ width = _width;
+ height = _height;
+ format = _format;
+ type = _type;
+ pixels_shm_id = _pixels_shm_id;
+ pixels_shm_offset = _pixels_shm_offset;
+ internal = _internal;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ GLboolean _internal) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _xoffset,
+ _yoffset,
+ _width,
+ _height,
+ _format,
+ _type,
+ _pixels_shm_id,
+ _pixels_shm_offset,
+ _internal);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t xoffset;
+ int32_t yoffset;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t type;
+ uint32_t pixels_shm_id;
+ uint32_t pixels_shm_offset;
+ uint32_t internal;
+};
+
+COMPILE_ASSERT(sizeof(TexSubImage2D) == 48, Sizeof_TexSubImage2D_is_not_48);
+COMPILE_ASSERT(offsetof(TexSubImage2D, header) == 0,
+ OffsetOf_TexSubImage2D_header_not_0);
+COMPILE_ASSERT(offsetof(TexSubImage2D, target) == 4,
+ OffsetOf_TexSubImage2D_target_not_4);
+COMPILE_ASSERT(offsetof(TexSubImage2D, level) == 8,
+ OffsetOf_TexSubImage2D_level_not_8);
+COMPILE_ASSERT(offsetof(TexSubImage2D, xoffset) == 12,
+ OffsetOf_TexSubImage2D_xoffset_not_12);
+COMPILE_ASSERT(offsetof(TexSubImage2D, yoffset) == 16,
+ OffsetOf_TexSubImage2D_yoffset_not_16);
+COMPILE_ASSERT(offsetof(TexSubImage2D, width) == 20,
+ OffsetOf_TexSubImage2D_width_not_20);
+COMPILE_ASSERT(offsetof(TexSubImage2D, height) == 24,
+ OffsetOf_TexSubImage2D_height_not_24);
+COMPILE_ASSERT(offsetof(TexSubImage2D, format) == 28,
+ OffsetOf_TexSubImage2D_format_not_28);
+COMPILE_ASSERT(offsetof(TexSubImage2D, type) == 32,
+ OffsetOf_TexSubImage2D_type_not_32);
+COMPILE_ASSERT(offsetof(TexSubImage2D, pixels_shm_id) == 36,
+ OffsetOf_TexSubImage2D_pixels_shm_id_not_36);
+COMPILE_ASSERT(offsetof(TexSubImage2D, pixels_shm_offset) == 40,
+ OffsetOf_TexSubImage2D_pixels_shm_offset_not_40);
+COMPILE_ASSERT(offsetof(TexSubImage2D, internal) == 44,
+ OffsetOf_TexSubImage2D_internal_not_44);
+
+struct Uniform1f {
+ typedef Uniform1f ValueType;
+ static const CommandId kCmdId = kUniform1f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLfloat _x) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ }
+
+ void* Set(void* cmd, GLint _location, GLfloat _x) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ float x;
+};
+
+COMPILE_ASSERT(sizeof(Uniform1f) == 12, Sizeof_Uniform1f_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform1f, header) == 0,
+ OffsetOf_Uniform1f_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform1f, location) == 4,
+ OffsetOf_Uniform1f_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform1f, x) == 8, OffsetOf_Uniform1f_x_not_8);
+
+struct Uniform1fvImmediate {
+ typedef Uniform1fvImmediate ValueType;
+ static const CommandId kCmdId = kUniform1fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 1 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform1fvImmediate) == 12,
+ Sizeof_Uniform1fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform1fvImmediate, header) == 0,
+ OffsetOf_Uniform1fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform1fvImmediate, location) == 4,
+ OffsetOf_Uniform1fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform1fvImmediate, count) == 8,
+ OffsetOf_Uniform1fvImmediate_count_not_8);
+
+struct Uniform1i {
+ typedef Uniform1i ValueType;
+ static const CommandId kCmdId = kUniform1i;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLint _x) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ }
+
+ void* Set(void* cmd, GLint _location, GLint _x) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t x;
+};
+
+COMPILE_ASSERT(sizeof(Uniform1i) == 12, Sizeof_Uniform1i_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform1i, header) == 0,
+ OffsetOf_Uniform1i_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform1i, location) == 4,
+ OffsetOf_Uniform1i_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform1i, x) == 8, OffsetOf_Uniform1i_x_not_8);
+
+struct Uniform1ivImmediate {
+ typedef Uniform1ivImmediate ValueType;
+ static const CommandId kCmdId = kUniform1ivImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLint) * 1 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLint* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLint* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform1ivImmediate) == 12,
+ Sizeof_Uniform1ivImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform1ivImmediate, header) == 0,
+ OffsetOf_Uniform1ivImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform1ivImmediate, location) == 4,
+ OffsetOf_Uniform1ivImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform1ivImmediate, count) == 8,
+ OffsetOf_Uniform1ivImmediate_count_not_8);
+
+struct Uniform2f {
+ typedef Uniform2f ValueType;
+ static const CommandId kCmdId = kUniform2f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLfloat _x, GLfloat _y) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ }
+
+ void* Set(void* cmd, GLint _location, GLfloat _x, GLfloat _y) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ float x;
+ float y;
+};
+
+COMPILE_ASSERT(sizeof(Uniform2f) == 16, Sizeof_Uniform2f_is_not_16);
+COMPILE_ASSERT(offsetof(Uniform2f, header) == 0,
+ OffsetOf_Uniform2f_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform2f, location) == 4,
+ OffsetOf_Uniform2f_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform2f, x) == 8, OffsetOf_Uniform2f_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform2f, y) == 12, OffsetOf_Uniform2f_y_not_12);
+
+struct Uniform2fvImmediate {
+ typedef Uniform2fvImmediate ValueType;
+ static const CommandId kCmdId = kUniform2fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 2 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform2fvImmediate) == 12,
+ Sizeof_Uniform2fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform2fvImmediate, header) == 0,
+ OffsetOf_Uniform2fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform2fvImmediate, location) == 4,
+ OffsetOf_Uniform2fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform2fvImmediate, count) == 8,
+ OffsetOf_Uniform2fvImmediate_count_not_8);
+
+struct Uniform2i {
+ typedef Uniform2i ValueType;
+ static const CommandId kCmdId = kUniform2i;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLint _x, GLint _y) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ }
+
+ void* Set(void* cmd, GLint _location, GLint _x, GLint _y) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t x;
+ int32_t y;
+};
+
+COMPILE_ASSERT(sizeof(Uniform2i) == 16, Sizeof_Uniform2i_is_not_16);
+COMPILE_ASSERT(offsetof(Uniform2i, header) == 0,
+ OffsetOf_Uniform2i_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform2i, location) == 4,
+ OffsetOf_Uniform2i_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform2i, x) == 8, OffsetOf_Uniform2i_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform2i, y) == 12, OffsetOf_Uniform2i_y_not_12);
+
+struct Uniform2ivImmediate {
+ typedef Uniform2ivImmediate ValueType;
+ static const CommandId kCmdId = kUniform2ivImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLint) * 2 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLint* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLint* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform2ivImmediate) == 12,
+ Sizeof_Uniform2ivImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform2ivImmediate, header) == 0,
+ OffsetOf_Uniform2ivImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform2ivImmediate, location) == 4,
+ OffsetOf_Uniform2ivImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform2ivImmediate, count) == 8,
+ OffsetOf_Uniform2ivImmediate_count_not_8);
+
+struct Uniform3f {
+ typedef Uniform3f ValueType;
+ static const CommandId kCmdId = kUniform3f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLfloat _x, GLfloat _y, GLfloat _z) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ z = _z;
+ }
+
+ void* Set(void* cmd, GLint _location, GLfloat _x, GLfloat _y, GLfloat _z) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y, _z);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ float x;
+ float y;
+ float z;
+};
+
+COMPILE_ASSERT(sizeof(Uniform3f) == 20, Sizeof_Uniform3f_is_not_20);
+COMPILE_ASSERT(offsetof(Uniform3f, header) == 0,
+ OffsetOf_Uniform3f_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform3f, location) == 4,
+ OffsetOf_Uniform3f_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform3f, x) == 8, OffsetOf_Uniform3f_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform3f, y) == 12, OffsetOf_Uniform3f_y_not_12);
+COMPILE_ASSERT(offsetof(Uniform3f, z) == 16, OffsetOf_Uniform3f_z_not_16);
+
+struct Uniform3fvImmediate {
+ typedef Uniform3fvImmediate ValueType;
+ static const CommandId kCmdId = kUniform3fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 3 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform3fvImmediate) == 12,
+ Sizeof_Uniform3fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform3fvImmediate, header) == 0,
+ OffsetOf_Uniform3fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform3fvImmediate, location) == 4,
+ OffsetOf_Uniform3fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform3fvImmediate, count) == 8,
+ OffsetOf_Uniform3fvImmediate_count_not_8);
+
+struct Uniform3i {
+ typedef Uniform3i ValueType;
+ static const CommandId kCmdId = kUniform3i;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLint _x, GLint _y, GLint _z) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ z = _z;
+ }
+
+ void* Set(void* cmd, GLint _location, GLint _x, GLint _y, GLint _z) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y, _z);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t x;
+ int32_t y;
+ int32_t z;
+};
+
+COMPILE_ASSERT(sizeof(Uniform3i) == 20, Sizeof_Uniform3i_is_not_20);
+COMPILE_ASSERT(offsetof(Uniform3i, header) == 0,
+ OffsetOf_Uniform3i_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform3i, location) == 4,
+ OffsetOf_Uniform3i_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform3i, x) == 8, OffsetOf_Uniform3i_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform3i, y) == 12, OffsetOf_Uniform3i_y_not_12);
+COMPILE_ASSERT(offsetof(Uniform3i, z) == 16, OffsetOf_Uniform3i_z_not_16);
+
+struct Uniform3ivImmediate {
+ typedef Uniform3ivImmediate ValueType;
+ static const CommandId kCmdId = kUniform3ivImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLint) * 3 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLint* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLint* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform3ivImmediate) == 12,
+ Sizeof_Uniform3ivImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform3ivImmediate, header) == 0,
+ OffsetOf_Uniform3ivImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform3ivImmediate, location) == 4,
+ OffsetOf_Uniform3ivImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform3ivImmediate, count) == 8,
+ OffsetOf_Uniform3ivImmediate_count_not_8);
+
+struct Uniform4f {
+ typedef Uniform4f ValueType;
+ static const CommandId kCmdId = kUniform4f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLfloat _x, GLfloat _y, GLfloat _z, GLfloat _w) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ z = _z;
+ w = _w;
+ }
+
+ void* Set(void* cmd,
+ GLint _location,
+ GLfloat _x,
+ GLfloat _y,
+ GLfloat _z,
+ GLfloat _w) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y, _z, _w);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ float x;
+ float y;
+ float z;
+ float w;
+};
+
+COMPILE_ASSERT(sizeof(Uniform4f) == 24, Sizeof_Uniform4f_is_not_24);
+COMPILE_ASSERT(offsetof(Uniform4f, header) == 0,
+ OffsetOf_Uniform4f_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform4f, location) == 4,
+ OffsetOf_Uniform4f_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform4f, x) == 8, OffsetOf_Uniform4f_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform4f, y) == 12, OffsetOf_Uniform4f_y_not_12);
+COMPILE_ASSERT(offsetof(Uniform4f, z) == 16, OffsetOf_Uniform4f_z_not_16);
+COMPILE_ASSERT(offsetof(Uniform4f, w) == 20, OffsetOf_Uniform4f_w_not_20);
+
+struct Uniform4fvImmediate {
+ typedef Uniform4fvImmediate ValueType;
+ static const CommandId kCmdId = kUniform4fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 4 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform4fvImmediate) == 12,
+ Sizeof_Uniform4fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform4fvImmediate, header) == 0,
+ OffsetOf_Uniform4fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform4fvImmediate, location) == 4,
+ OffsetOf_Uniform4fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform4fvImmediate, count) == 8,
+ OffsetOf_Uniform4fvImmediate_count_not_8);
+
+struct Uniform4i {
+ typedef Uniform4i ValueType;
+ static const CommandId kCmdId = kUniform4i;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _location, GLint _x, GLint _y, GLint _z, GLint _w) {
+ SetHeader();
+ location = _location;
+ x = _x;
+ y = _y;
+ z = _z;
+ w = _w;
+ }
+
+ void* Set(void* cmd,
+ GLint _location,
+ GLint _x,
+ GLint _y,
+ GLint _z,
+ GLint _w) {
+ static_cast<ValueType*>(cmd)->Init(_location, _x, _y, _z, _w);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t x;
+ int32_t y;
+ int32_t z;
+ int32_t w;
+};
+
+COMPILE_ASSERT(sizeof(Uniform4i) == 24, Sizeof_Uniform4i_is_not_24);
+COMPILE_ASSERT(offsetof(Uniform4i, header) == 0,
+ OffsetOf_Uniform4i_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform4i, location) == 4,
+ OffsetOf_Uniform4i_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform4i, x) == 8, OffsetOf_Uniform4i_x_not_8);
+COMPILE_ASSERT(offsetof(Uniform4i, y) == 12, OffsetOf_Uniform4i_y_not_12);
+COMPILE_ASSERT(offsetof(Uniform4i, z) == 16, OffsetOf_Uniform4i_z_not_16);
+COMPILE_ASSERT(offsetof(Uniform4i, w) == 20, OffsetOf_Uniform4i_w_not_20);
+
+struct Uniform4ivImmediate {
+ typedef Uniform4ivImmediate ValueType;
+ static const CommandId kCmdId = kUniform4ivImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLint) * 4 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLint* _v) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _v, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLint* _v) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _v);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(Uniform4ivImmediate) == 12,
+ Sizeof_Uniform4ivImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(Uniform4ivImmediate, header) == 0,
+ OffsetOf_Uniform4ivImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(Uniform4ivImmediate, location) == 4,
+ OffsetOf_Uniform4ivImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(Uniform4ivImmediate, count) == 8,
+ OffsetOf_Uniform4ivImmediate_count_not_8);
+
+struct UniformMatrix2fvImmediate {
+ typedef UniformMatrix2fvImmediate ValueType;
+ static const CommandId kCmdId = kUniformMatrix2fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 4 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _value) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _value, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _value) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _value);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+ static const uint32_t transpose = false;
+};
+
+COMPILE_ASSERT(sizeof(UniformMatrix2fvImmediate) == 12,
+ Sizeof_UniformMatrix2fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(UniformMatrix2fvImmediate, header) == 0,
+ OffsetOf_UniformMatrix2fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(UniformMatrix2fvImmediate, location) == 4,
+ OffsetOf_UniformMatrix2fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(UniformMatrix2fvImmediate, count) == 8,
+ OffsetOf_UniformMatrix2fvImmediate_count_not_8);
+
+struct UniformMatrix3fvImmediate {
+ typedef UniformMatrix3fvImmediate ValueType;
+ static const CommandId kCmdId = kUniformMatrix3fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 9 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _value) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _value, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _value) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _value);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+ static const uint32_t transpose = false;
+};
+
+COMPILE_ASSERT(sizeof(UniformMatrix3fvImmediate) == 12,
+ Sizeof_UniformMatrix3fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(UniformMatrix3fvImmediate, header) == 0,
+ OffsetOf_UniformMatrix3fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(UniformMatrix3fvImmediate, location) == 4,
+ OffsetOf_UniformMatrix3fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(UniformMatrix3fvImmediate, count) == 8,
+ OffsetOf_UniformMatrix3fvImmediate_count_not_8);
+
+struct UniformMatrix4fvImmediate {
+ typedef UniformMatrix4fvImmediate ValueType;
+ static const CommandId kCmdId = kUniformMatrix4fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 16 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLint _location, GLsizei _count, const GLfloat* _value) {
+ SetHeader(_count);
+ location = _location;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _value, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLint _location, GLsizei _count, const GLfloat* _value) {
+ static_cast<ValueType*>(cmd)->Init(_location, _count, _value);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t location;
+ int32_t count;
+ static const uint32_t transpose = false;
+};
+
+COMPILE_ASSERT(sizeof(UniformMatrix4fvImmediate) == 12,
+ Sizeof_UniformMatrix4fvImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(UniformMatrix4fvImmediate, header) == 0,
+ OffsetOf_UniformMatrix4fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(UniformMatrix4fvImmediate, location) == 4,
+ OffsetOf_UniformMatrix4fvImmediate_location_not_4);
+COMPILE_ASSERT(offsetof(UniformMatrix4fvImmediate, count) == 8,
+ OffsetOf_UniformMatrix4fvImmediate_count_not_8);
+
+struct UseProgram {
+ typedef UseProgram ValueType;
+ static const CommandId kCmdId = kUseProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program) {
+ SetHeader();
+ program = _program;
+ }
+
+ void* Set(void* cmd, GLuint _program) {
+ static_cast<ValueType*>(cmd)->Init(_program);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+};
+
+COMPILE_ASSERT(sizeof(UseProgram) == 8, Sizeof_UseProgram_is_not_8);
+COMPILE_ASSERT(offsetof(UseProgram, header) == 0,
+ OffsetOf_UseProgram_header_not_0);
+COMPILE_ASSERT(offsetof(UseProgram, program) == 4,
+ OffsetOf_UseProgram_program_not_4);
+
+struct ValidateProgram {
+ typedef ValidateProgram ValueType;
+ static const CommandId kCmdId = kValidateProgram;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program) {
+ SetHeader();
+ program = _program;
+ }
+
+ void* Set(void* cmd, GLuint _program) {
+ static_cast<ValueType*>(cmd)->Init(_program);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+};
+
+COMPILE_ASSERT(sizeof(ValidateProgram) == 8, Sizeof_ValidateProgram_is_not_8);
+COMPILE_ASSERT(offsetof(ValidateProgram, header) == 0,
+ OffsetOf_ValidateProgram_header_not_0);
+COMPILE_ASSERT(offsetof(ValidateProgram, program) == 4,
+ OffsetOf_ValidateProgram_program_not_4);
+
+struct VertexAttrib1f {
+ typedef VertexAttrib1f ValueType;
+ static const CommandId kCmdId = kVertexAttrib1f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _indx, GLfloat _x) {
+ SetHeader();
+ indx = _indx;
+ x = _x;
+ }
+
+ void* Set(void* cmd, GLuint _indx, GLfloat _x) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _x);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+ float x;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib1f) == 12, Sizeof_VertexAttrib1f_is_not_12);
+COMPILE_ASSERT(offsetof(VertexAttrib1f, header) == 0,
+ OffsetOf_VertexAttrib1f_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib1f, indx) == 4,
+ OffsetOf_VertexAttrib1f_indx_not_4);
+COMPILE_ASSERT(offsetof(VertexAttrib1f, x) == 8,
+ OffsetOf_VertexAttrib1f_x_not_8);
+
+struct VertexAttrib1fvImmediate {
+ typedef VertexAttrib1fvImmediate ValueType;
+ static const CommandId kCmdId = kVertexAttrib1fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 1); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _indx, const GLfloat* _values) {
+ SetHeader();
+ indx = _indx;
+ memcpy(ImmediateDataAddress(this), _values, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLuint _indx, const GLfloat* _values) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _values);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib1fvImmediate) == 8,
+ Sizeof_VertexAttrib1fvImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib1fvImmediate, header) == 0,
+ OffsetOf_VertexAttrib1fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib1fvImmediate, indx) == 4,
+ OffsetOf_VertexAttrib1fvImmediate_indx_not_4);
+
+struct VertexAttrib2f {
+ typedef VertexAttrib2f ValueType;
+ static const CommandId kCmdId = kVertexAttrib2f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _indx, GLfloat _x, GLfloat _y) {
+ SetHeader();
+ indx = _indx;
+ x = _x;
+ y = _y;
+ }
+
+ void* Set(void* cmd, GLuint _indx, GLfloat _x, GLfloat _y) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _x, _y);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+ float x;
+ float y;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib2f) == 16, Sizeof_VertexAttrib2f_is_not_16);
+COMPILE_ASSERT(offsetof(VertexAttrib2f, header) == 0,
+ OffsetOf_VertexAttrib2f_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib2f, indx) == 4,
+ OffsetOf_VertexAttrib2f_indx_not_4);
+COMPILE_ASSERT(offsetof(VertexAttrib2f, x) == 8,
+ OffsetOf_VertexAttrib2f_x_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib2f, y) == 12,
+ OffsetOf_VertexAttrib2f_y_not_12);
+
+struct VertexAttrib2fvImmediate {
+ typedef VertexAttrib2fvImmediate ValueType;
+ static const CommandId kCmdId = kVertexAttrib2fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 2); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _indx, const GLfloat* _values) {
+ SetHeader();
+ indx = _indx;
+ memcpy(ImmediateDataAddress(this), _values, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLuint _indx, const GLfloat* _values) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _values);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib2fvImmediate) == 8,
+ Sizeof_VertexAttrib2fvImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib2fvImmediate, header) == 0,
+ OffsetOf_VertexAttrib2fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib2fvImmediate, indx) == 4,
+ OffsetOf_VertexAttrib2fvImmediate_indx_not_4);
+
+struct VertexAttrib3f {
+ typedef VertexAttrib3f ValueType;
+ static const CommandId kCmdId = kVertexAttrib3f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _indx, GLfloat _x, GLfloat _y, GLfloat _z) {
+ SetHeader();
+ indx = _indx;
+ x = _x;
+ y = _y;
+ z = _z;
+ }
+
+ void* Set(void* cmd, GLuint _indx, GLfloat _x, GLfloat _y, GLfloat _z) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _x, _y, _z);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+ float x;
+ float y;
+ float z;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib3f) == 20, Sizeof_VertexAttrib3f_is_not_20);
+COMPILE_ASSERT(offsetof(VertexAttrib3f, header) == 0,
+ OffsetOf_VertexAttrib3f_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib3f, indx) == 4,
+ OffsetOf_VertexAttrib3f_indx_not_4);
+COMPILE_ASSERT(offsetof(VertexAttrib3f, x) == 8,
+ OffsetOf_VertexAttrib3f_x_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib3f, y) == 12,
+ OffsetOf_VertexAttrib3f_y_not_12);
+COMPILE_ASSERT(offsetof(VertexAttrib3f, z) == 16,
+ OffsetOf_VertexAttrib3f_z_not_16);
+
+struct VertexAttrib3fvImmediate {
+ typedef VertexAttrib3fvImmediate ValueType;
+ static const CommandId kCmdId = kVertexAttrib3fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 3); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _indx, const GLfloat* _values) {
+ SetHeader();
+ indx = _indx;
+ memcpy(ImmediateDataAddress(this), _values, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLuint _indx, const GLfloat* _values) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _values);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib3fvImmediate) == 8,
+ Sizeof_VertexAttrib3fvImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib3fvImmediate, header) == 0,
+ OffsetOf_VertexAttrib3fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib3fvImmediate, indx) == 4,
+ OffsetOf_VertexAttrib3fvImmediate_indx_not_4);
+
+struct VertexAttrib4f {
+ typedef VertexAttrib4f ValueType;
+ static const CommandId kCmdId = kVertexAttrib4f;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _indx, GLfloat _x, GLfloat _y, GLfloat _z, GLfloat _w) {
+ SetHeader();
+ indx = _indx;
+ x = _x;
+ y = _y;
+ z = _z;
+ w = _w;
+ }
+
+ void* Set(void* cmd,
+ GLuint _indx,
+ GLfloat _x,
+ GLfloat _y,
+ GLfloat _z,
+ GLfloat _w) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _x, _y, _z, _w);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+ float x;
+ float y;
+ float z;
+ float w;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib4f) == 24, Sizeof_VertexAttrib4f_is_not_24);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, header) == 0,
+ OffsetOf_VertexAttrib4f_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, indx) == 4,
+ OffsetOf_VertexAttrib4f_indx_not_4);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, x) == 8,
+ OffsetOf_VertexAttrib4f_x_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, y) == 12,
+ OffsetOf_VertexAttrib4f_y_not_12);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, z) == 16,
+ OffsetOf_VertexAttrib4f_z_not_16);
+COMPILE_ASSERT(offsetof(VertexAttrib4f, w) == 20,
+ OffsetOf_VertexAttrib4f_w_not_20);
+
+struct VertexAttrib4fvImmediate {
+ typedef VertexAttrib4fvImmediate ValueType;
+ static const CommandId kCmdId = kVertexAttrib4fvImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 4); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _indx, const GLfloat* _values) {
+ SetHeader();
+ indx = _indx;
+ memcpy(ImmediateDataAddress(this), _values, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLuint _indx, const GLfloat* _values) {
+ static_cast<ValueType*>(cmd)->Init(_indx, _values);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttrib4fvImmediate) == 8,
+ Sizeof_VertexAttrib4fvImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(VertexAttrib4fvImmediate, header) == 0,
+ OffsetOf_VertexAttrib4fvImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttrib4fvImmediate, indx) == 4,
+ OffsetOf_VertexAttrib4fvImmediate_indx_not_4);
+
+struct VertexAttribPointer {
+ typedef VertexAttribPointer ValueType;
+ static const CommandId kCmdId = kVertexAttribPointer;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _indx,
+ GLint _size,
+ GLenum _type,
+ GLboolean _normalized,
+ GLsizei _stride,
+ GLuint _offset) {
+ SetHeader();
+ indx = _indx;
+ size = _size;
+ type = _type;
+ normalized = _normalized;
+ stride = _stride;
+ offset = _offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _indx,
+ GLint _size,
+ GLenum _type,
+ GLboolean _normalized,
+ GLsizei _stride,
+ GLuint _offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_indx, _size, _type, _normalized, _stride, _offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t indx;
+ int32_t size;
+ uint32_t type;
+ uint32_t normalized;
+ int32_t stride;
+ uint32_t offset;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttribPointer) == 28,
+ Sizeof_VertexAttribPointer_is_not_28);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, header) == 0,
+ OffsetOf_VertexAttribPointer_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, indx) == 4,
+ OffsetOf_VertexAttribPointer_indx_not_4);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, size) == 8,
+ OffsetOf_VertexAttribPointer_size_not_8);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, type) == 12,
+ OffsetOf_VertexAttribPointer_type_not_12);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, normalized) == 16,
+ OffsetOf_VertexAttribPointer_normalized_not_16);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, stride) == 20,
+ OffsetOf_VertexAttribPointer_stride_not_20);
+COMPILE_ASSERT(offsetof(VertexAttribPointer, offset) == 24,
+ OffsetOf_VertexAttribPointer_offset_not_24);
+
+struct Viewport {
+ typedef Viewport ValueType;
+ static const CommandId kCmdId = kViewport;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _x, GLint _y, GLsizei _width, GLsizei _height) {
+ SetHeader();
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd, GLint _x, GLint _y, GLsizei _width, GLsizei _height) {
+ static_cast<ValueType*>(cmd)->Init(_x, _y, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(Viewport) == 20, Sizeof_Viewport_is_not_20);
+COMPILE_ASSERT(offsetof(Viewport, header) == 0, OffsetOf_Viewport_header_not_0);
+COMPILE_ASSERT(offsetof(Viewport, x) == 4, OffsetOf_Viewport_x_not_4);
+COMPILE_ASSERT(offsetof(Viewport, y) == 8, OffsetOf_Viewport_y_not_8);
+COMPILE_ASSERT(offsetof(Viewport, width) == 12, OffsetOf_Viewport_width_not_12);
+COMPILE_ASSERT(offsetof(Viewport, height) == 16,
+ OffsetOf_Viewport_height_not_16);
+
+struct BlitFramebufferCHROMIUM {
+ typedef BlitFramebufferCHROMIUM ValueType;
+ static const CommandId kCmdId = kBlitFramebufferCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _srcX0,
+ GLint _srcY0,
+ GLint _srcX1,
+ GLint _srcY1,
+ GLint _dstX0,
+ GLint _dstY0,
+ GLint _dstX1,
+ GLint _dstY1,
+ GLbitfield _mask,
+ GLenum _filter) {
+ SetHeader();
+ srcX0 = _srcX0;
+ srcY0 = _srcY0;
+ srcX1 = _srcX1;
+ srcY1 = _srcY1;
+ dstX0 = _dstX0;
+ dstY0 = _dstY0;
+ dstX1 = _dstX1;
+ dstY1 = _dstY1;
+ mask = _mask;
+ filter = _filter;
+ }
+
+ void* Set(void* cmd,
+ GLint _srcX0,
+ GLint _srcY0,
+ GLint _srcX1,
+ GLint _srcY1,
+ GLint _dstX0,
+ GLint _dstY0,
+ GLint _dstX1,
+ GLint _dstY1,
+ GLbitfield _mask,
+ GLenum _filter) {
+ static_cast<ValueType*>(cmd)->Init(_srcX0,
+ _srcY0,
+ _srcX1,
+ _srcY1,
+ _dstX0,
+ _dstY0,
+ _dstX1,
+ _dstY1,
+ _mask,
+ _filter);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t srcX0;
+ int32_t srcY0;
+ int32_t srcX1;
+ int32_t srcY1;
+ int32_t dstX0;
+ int32_t dstY0;
+ int32_t dstX1;
+ int32_t dstY1;
+ uint32_t mask;
+ uint32_t filter;
+};
+
+COMPILE_ASSERT(sizeof(BlitFramebufferCHROMIUM) == 44,
+ Sizeof_BlitFramebufferCHROMIUM_is_not_44);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, header) == 0,
+ OffsetOf_BlitFramebufferCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, srcX0) == 4,
+ OffsetOf_BlitFramebufferCHROMIUM_srcX0_not_4);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, srcY0) == 8,
+ OffsetOf_BlitFramebufferCHROMIUM_srcY0_not_8);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, srcX1) == 12,
+ OffsetOf_BlitFramebufferCHROMIUM_srcX1_not_12);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, srcY1) == 16,
+ OffsetOf_BlitFramebufferCHROMIUM_srcY1_not_16);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, dstX0) == 20,
+ OffsetOf_BlitFramebufferCHROMIUM_dstX0_not_20);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, dstY0) == 24,
+ OffsetOf_BlitFramebufferCHROMIUM_dstY0_not_24);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, dstX1) == 28,
+ OffsetOf_BlitFramebufferCHROMIUM_dstX1_not_28);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, dstY1) == 32,
+ OffsetOf_BlitFramebufferCHROMIUM_dstY1_not_32);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, mask) == 36,
+ OffsetOf_BlitFramebufferCHROMIUM_mask_not_36);
+COMPILE_ASSERT(offsetof(BlitFramebufferCHROMIUM, filter) == 40,
+ OffsetOf_BlitFramebufferCHROMIUM_filter_not_40);
+
+// GL_CHROMIUM_framebuffer_multisample
+struct RenderbufferStorageMultisampleCHROMIUM {
+ typedef RenderbufferStorageMultisampleCHROMIUM ValueType;
+ static const CommandId kCmdId = kRenderbufferStorageMultisampleCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizei _samples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ samples = _samples;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _samples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _samples, _internalformat, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t samples;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(RenderbufferStorageMultisampleCHROMIUM) == 24,
+ Sizeof_RenderbufferStorageMultisampleCHROMIUM_is_not_24);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleCHROMIUM, header) == 0,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleCHROMIUM, target) == 4,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleCHROMIUM, samples) == 8,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_samples_not_8);
+COMPILE_ASSERT(
+ offsetof(RenderbufferStorageMultisampleCHROMIUM, internalformat) == 12,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_internalformat_not_12);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleCHROMIUM, width) == 16,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_width_not_16);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleCHROMIUM, height) == 20,
+ OffsetOf_RenderbufferStorageMultisampleCHROMIUM_height_not_20);
+
+// GL_EXT_multisampled_render_to_texture
+struct RenderbufferStorageMultisampleEXT {
+ typedef RenderbufferStorageMultisampleEXT ValueType;
+ static const CommandId kCmdId = kRenderbufferStorageMultisampleEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizei _samples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ samples = _samples;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _samples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _samples, _internalformat, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t samples;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(RenderbufferStorageMultisampleEXT) == 24,
+ Sizeof_RenderbufferStorageMultisampleEXT_is_not_24);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleEXT, header) == 0,
+ OffsetOf_RenderbufferStorageMultisampleEXT_header_not_0);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleEXT, target) == 4,
+ OffsetOf_RenderbufferStorageMultisampleEXT_target_not_4);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleEXT, samples) == 8,
+ OffsetOf_RenderbufferStorageMultisampleEXT_samples_not_8);
+COMPILE_ASSERT(
+ offsetof(RenderbufferStorageMultisampleEXT, internalformat) == 12,
+ OffsetOf_RenderbufferStorageMultisampleEXT_internalformat_not_12);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleEXT, width) == 16,
+ OffsetOf_RenderbufferStorageMultisampleEXT_width_not_16);
+COMPILE_ASSERT(offsetof(RenderbufferStorageMultisampleEXT, height) == 20,
+ OffsetOf_RenderbufferStorageMultisampleEXT_height_not_20);
+
+struct FramebufferTexture2DMultisampleEXT {
+ typedef FramebufferTexture2DMultisampleEXT ValueType;
+ static const CommandId kCmdId = kFramebufferTexture2DMultisampleEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _attachment,
+ GLenum _textarget,
+ GLuint _texture,
+ GLsizei _samples) {
+ SetHeader();
+ target = _target;
+ attachment = _attachment;
+ textarget = _textarget;
+ texture = _texture;
+ samples = _samples;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _attachment,
+ GLenum _textarget,
+ GLuint _texture,
+ GLsizei _samples) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _attachment, _textarget, _texture, _samples);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t attachment;
+ uint32_t textarget;
+ uint32_t texture;
+ int32_t samples;
+ static const int32_t level = 0;
+};
+
+COMPILE_ASSERT(sizeof(FramebufferTexture2DMultisampleEXT) == 24,
+ Sizeof_FramebufferTexture2DMultisampleEXT_is_not_24);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, header) == 0,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_header_not_0);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, target) == 4,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_target_not_4);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, attachment) == 8,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_attachment_not_8);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, textarget) == 12,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_textarget_not_12);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, texture) == 16,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_texture_not_16);
+COMPILE_ASSERT(offsetof(FramebufferTexture2DMultisampleEXT, samples) == 20,
+ OffsetOf_FramebufferTexture2DMultisampleEXT_samples_not_20);
+
+struct TexStorage2DEXT {
+ typedef TexStorage2DEXT ValueType;
+ static const CommandId kCmdId = kTexStorage2DEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizei _levels,
+ GLenum _internalFormat,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ levels = _levels;
+ internalFormat = _internalFormat;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _levels,
+ GLenum _internalFormat,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _levels, _internalFormat, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t levels;
+ uint32_t internalFormat;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(TexStorage2DEXT) == 24, Sizeof_TexStorage2DEXT_is_not_24);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, header) == 0,
+ OffsetOf_TexStorage2DEXT_header_not_0);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, target) == 4,
+ OffsetOf_TexStorage2DEXT_target_not_4);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, levels) == 8,
+ OffsetOf_TexStorage2DEXT_levels_not_8);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, internalFormat) == 12,
+ OffsetOf_TexStorage2DEXT_internalFormat_not_12);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, width) == 16,
+ OffsetOf_TexStorage2DEXT_width_not_16);
+COMPILE_ASSERT(offsetof(TexStorage2DEXT, height) == 20,
+ OffsetOf_TexStorage2DEXT_height_not_20);
+
+struct GenQueriesEXTImmediate {
+ typedef GenQueriesEXTImmediate ValueType;
+ static const CommandId kCmdId = kGenQueriesEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _queries) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _queries, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _queries) {
+ static_cast<ValueType*>(cmd)->Init(_n, _queries);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenQueriesEXTImmediate) == 8,
+ Sizeof_GenQueriesEXTImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenQueriesEXTImmediate, header) == 0,
+ OffsetOf_GenQueriesEXTImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenQueriesEXTImmediate, n) == 4,
+ OffsetOf_GenQueriesEXTImmediate_n_not_4);
+
+struct DeleteQueriesEXTImmediate {
+ typedef DeleteQueriesEXTImmediate ValueType;
+ static const CommandId kCmdId = kDeleteQueriesEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _queries) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _queries, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _queries) {
+ static_cast<ValueType*>(cmd)->Init(_n, _queries);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteQueriesEXTImmediate) == 8,
+ Sizeof_DeleteQueriesEXTImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteQueriesEXTImmediate, header) == 0,
+ OffsetOf_DeleteQueriesEXTImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteQueriesEXTImmediate, n) == 4,
+ OffsetOf_DeleteQueriesEXTImmediate_n_not_4);
+
+struct BeginQueryEXT {
+ typedef BeginQueryEXT ValueType;
+ static const CommandId kCmdId = kBeginQueryEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLuint _id,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ SetHeader();
+ target = _target;
+ id = _id;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLuint _id,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _id, _sync_data_shm_id, _sync_data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t id;
+ uint32_t sync_data_shm_id;
+ uint32_t sync_data_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(BeginQueryEXT) == 20, Sizeof_BeginQueryEXT_is_not_20);
+COMPILE_ASSERT(offsetof(BeginQueryEXT, header) == 0,
+ OffsetOf_BeginQueryEXT_header_not_0);
+COMPILE_ASSERT(offsetof(BeginQueryEXT, target) == 4,
+ OffsetOf_BeginQueryEXT_target_not_4);
+COMPILE_ASSERT(offsetof(BeginQueryEXT, id) == 8,
+ OffsetOf_BeginQueryEXT_id_not_8);
+COMPILE_ASSERT(offsetof(BeginQueryEXT, sync_data_shm_id) == 12,
+ OffsetOf_BeginQueryEXT_sync_data_shm_id_not_12);
+COMPILE_ASSERT(offsetof(BeginQueryEXT, sync_data_shm_offset) == 16,
+ OffsetOf_BeginQueryEXT_sync_data_shm_offset_not_16);
+
+struct EndQueryEXT {
+ typedef EndQueryEXT ValueType;
+ static const CommandId kCmdId = kEndQueryEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _submit_count) {
+ SetHeader();
+ target = _target;
+ submit_count = _submit_count;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _submit_count) {
+ static_cast<ValueType*>(cmd)->Init(_target, _submit_count);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t submit_count;
+};
+
+COMPILE_ASSERT(sizeof(EndQueryEXT) == 12, Sizeof_EndQueryEXT_is_not_12);
+COMPILE_ASSERT(offsetof(EndQueryEXT, header) == 0,
+ OffsetOf_EndQueryEXT_header_not_0);
+COMPILE_ASSERT(offsetof(EndQueryEXT, target) == 4,
+ OffsetOf_EndQueryEXT_target_not_4);
+COMPILE_ASSERT(offsetof(EndQueryEXT, submit_count) == 8,
+ OffsetOf_EndQueryEXT_submit_count_not_8);
+
+struct InsertEventMarkerEXT {
+ typedef InsertEventMarkerEXT ValueType;
+ static const CommandId kCmdId = kInsertEventMarkerEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _bucket_id) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(InsertEventMarkerEXT) == 8,
+ Sizeof_InsertEventMarkerEXT_is_not_8);
+COMPILE_ASSERT(offsetof(InsertEventMarkerEXT, header) == 0,
+ OffsetOf_InsertEventMarkerEXT_header_not_0);
+COMPILE_ASSERT(offsetof(InsertEventMarkerEXT, bucket_id) == 4,
+ OffsetOf_InsertEventMarkerEXT_bucket_id_not_4);
+
+struct PushGroupMarkerEXT {
+ typedef PushGroupMarkerEXT ValueType;
+ static const CommandId kCmdId = kPushGroupMarkerEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _bucket_id) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(PushGroupMarkerEXT) == 8,
+ Sizeof_PushGroupMarkerEXT_is_not_8);
+COMPILE_ASSERT(offsetof(PushGroupMarkerEXT, header) == 0,
+ OffsetOf_PushGroupMarkerEXT_header_not_0);
+COMPILE_ASSERT(offsetof(PushGroupMarkerEXT, bucket_id) == 4,
+ OffsetOf_PushGroupMarkerEXT_bucket_id_not_4);
+
+struct PopGroupMarkerEXT {
+ typedef PopGroupMarkerEXT ValueType;
+ static const CommandId kCmdId = kPopGroupMarkerEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(PopGroupMarkerEXT) == 4,
+ Sizeof_PopGroupMarkerEXT_is_not_4);
+COMPILE_ASSERT(offsetof(PopGroupMarkerEXT, header) == 0,
+ OffsetOf_PopGroupMarkerEXT_header_not_0);
+
+struct GenVertexArraysOESImmediate {
+ typedef GenVertexArraysOESImmediate ValueType;
+ static const CommandId kCmdId = kGenVertexArraysOESImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, GLuint* _arrays) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _arrays, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _arrays) {
+ static_cast<ValueType*>(cmd)->Init(_n, _arrays);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(GenVertexArraysOESImmediate) == 8,
+ Sizeof_GenVertexArraysOESImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(GenVertexArraysOESImmediate, header) == 0,
+ OffsetOf_GenVertexArraysOESImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(GenVertexArraysOESImmediate, n) == 4,
+ OffsetOf_GenVertexArraysOESImmediate_n_not_4);
+
+struct DeleteVertexArraysOESImmediate {
+ typedef DeleteVertexArraysOESImmediate ValueType;
+ static const CommandId kCmdId = kDeleteVertexArraysOESImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _arrays) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _arrays, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _arrays) {
+ static_cast<ValueType*>(cmd)->Init(_n, _arrays);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+COMPILE_ASSERT(sizeof(DeleteVertexArraysOESImmediate) == 8,
+ Sizeof_DeleteVertexArraysOESImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DeleteVertexArraysOESImmediate, header) == 0,
+ OffsetOf_DeleteVertexArraysOESImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteVertexArraysOESImmediate, n) == 4,
+ OffsetOf_DeleteVertexArraysOESImmediate_n_not_4);
+
+struct IsVertexArrayOES {
+ typedef IsVertexArrayOES ValueType;
+ static const CommandId kCmdId = kIsVertexArrayOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _array,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ array = _array;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _array,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_array, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t array;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(IsVertexArrayOES) == 16,
+ Sizeof_IsVertexArrayOES_is_not_16);
+COMPILE_ASSERT(offsetof(IsVertexArrayOES, header) == 0,
+ OffsetOf_IsVertexArrayOES_header_not_0);
+COMPILE_ASSERT(offsetof(IsVertexArrayOES, array) == 4,
+ OffsetOf_IsVertexArrayOES_array_not_4);
+COMPILE_ASSERT(offsetof(IsVertexArrayOES, result_shm_id) == 8,
+ OffsetOf_IsVertexArrayOES_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(IsVertexArrayOES, result_shm_offset) == 12,
+ OffsetOf_IsVertexArrayOES_result_shm_offset_not_12);
+
+struct BindVertexArrayOES {
+ typedef BindVertexArrayOES ValueType;
+ static const CommandId kCmdId = kBindVertexArrayOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _array) {
+ SetHeader();
+ array = _array;
+ }
+
+ void* Set(void* cmd, GLuint _array) {
+ static_cast<ValueType*>(cmd)->Init(_array);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t array;
+};
+
+COMPILE_ASSERT(sizeof(BindVertexArrayOES) == 8,
+ Sizeof_BindVertexArrayOES_is_not_8);
+COMPILE_ASSERT(offsetof(BindVertexArrayOES, header) == 0,
+ OffsetOf_BindVertexArrayOES_header_not_0);
+COMPILE_ASSERT(offsetof(BindVertexArrayOES, array) == 4,
+ OffsetOf_BindVertexArrayOES_array_not_4);
+
+struct SwapBuffers {
+ typedef SwapBuffers ValueType;
+ static const CommandId kCmdId = kSwapBuffers;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(SwapBuffers) == 4, Sizeof_SwapBuffers_is_not_4);
+COMPILE_ASSERT(offsetof(SwapBuffers, header) == 0,
+ OffsetOf_SwapBuffers_header_not_0);
+
+struct GetMaxValueInBufferCHROMIUM {
+ typedef GetMaxValueInBufferCHROMIUM ValueType;
+ static const CommandId kCmdId = kGetMaxValueInBufferCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLuint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buffer_id,
+ GLsizei _count,
+ GLenum _type,
+ GLuint _offset,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ buffer_id = _buffer_id;
+ count = _count;
+ type = _type;
+ offset = _offset;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _buffer_id,
+ GLsizei _count,
+ GLenum _type,
+ GLuint _offset,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(
+ _buffer_id, _count, _type, _offset, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buffer_id;
+ int32_t count;
+ uint32_t type;
+ uint32_t offset;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GetMaxValueInBufferCHROMIUM) == 28,
+ Sizeof_GetMaxValueInBufferCHROMIUM_is_not_28);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, header) == 0,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, buffer_id) == 4,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_buffer_id_not_4);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, count) == 8,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_count_not_8);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, type) == 12,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_type_not_12);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, offset) == 16,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_offset_not_16);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, result_shm_id) == 20,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_result_shm_id_not_20);
+COMPILE_ASSERT(offsetof(GetMaxValueInBufferCHROMIUM, result_shm_offset) == 24,
+ OffsetOf_GetMaxValueInBufferCHROMIUM_result_shm_offset_not_24);
+
+struct EnableFeatureCHROMIUM {
+ typedef EnableFeatureCHROMIUM ValueType;
+ static const CommandId kCmdId = kEnableFeatureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_bucket_id, _result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(EnableFeatureCHROMIUM) == 16,
+ Sizeof_EnableFeatureCHROMIUM_is_not_16);
+COMPILE_ASSERT(offsetof(EnableFeatureCHROMIUM, header) == 0,
+ OffsetOf_EnableFeatureCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(EnableFeatureCHROMIUM, bucket_id) == 4,
+ OffsetOf_EnableFeatureCHROMIUM_bucket_id_not_4);
+COMPILE_ASSERT(offsetof(EnableFeatureCHROMIUM, result_shm_id) == 8,
+ OffsetOf_EnableFeatureCHROMIUM_result_shm_id_not_8);
+COMPILE_ASSERT(offsetof(EnableFeatureCHROMIUM, result_shm_offset) == 12,
+ OffsetOf_EnableFeatureCHROMIUM_result_shm_offset_not_12);
+
+struct ResizeCHROMIUM {
+ typedef ResizeCHROMIUM ValueType;
+ static const CommandId kCmdId = kResizeCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _width, GLuint _height, GLfloat _scale_factor) {
+ SetHeader();
+ width = _width;
+ height = _height;
+ scale_factor = _scale_factor;
+ }
+
+ void* Set(void* cmd, GLuint _width, GLuint _height, GLfloat _scale_factor) {
+ static_cast<ValueType*>(cmd)->Init(_width, _height, _scale_factor);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t width;
+ uint32_t height;
+ float scale_factor;
+};
+
+COMPILE_ASSERT(sizeof(ResizeCHROMIUM) == 16, Sizeof_ResizeCHROMIUM_is_not_16);
+COMPILE_ASSERT(offsetof(ResizeCHROMIUM, header) == 0,
+ OffsetOf_ResizeCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(ResizeCHROMIUM, width) == 4,
+ OffsetOf_ResizeCHROMIUM_width_not_4);
+COMPILE_ASSERT(offsetof(ResizeCHROMIUM, height) == 8,
+ OffsetOf_ResizeCHROMIUM_height_not_8);
+COMPILE_ASSERT(offsetof(ResizeCHROMIUM, scale_factor) == 12,
+ OffsetOf_ResizeCHROMIUM_scale_factor_not_12);
+
+struct GetRequestableExtensionsCHROMIUM {
+ typedef GetRequestableExtensionsCHROMIUM ValueType;
+ static const CommandId kCmdId = kGetRequestableExtensionsCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _bucket_id) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetRequestableExtensionsCHROMIUM) == 8,
+ Sizeof_GetRequestableExtensionsCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(GetRequestableExtensionsCHROMIUM, header) == 0,
+ OffsetOf_GetRequestableExtensionsCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(GetRequestableExtensionsCHROMIUM, bucket_id) == 4,
+ OffsetOf_GetRequestableExtensionsCHROMIUM_bucket_id_not_4);
+
+struct RequestExtensionCHROMIUM {
+ typedef RequestExtensionCHROMIUM ValueType;
+ static const CommandId kCmdId = kRequestExtensionCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _bucket_id) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(RequestExtensionCHROMIUM) == 8,
+ Sizeof_RequestExtensionCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, header) == 0,
+ OffsetOf_RequestExtensionCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, bucket_id) == 4,
+ OffsetOf_RequestExtensionCHROMIUM_bucket_id_not_4);
+
+struct GetMultipleIntegervCHROMIUM {
+ typedef GetMultipleIntegervCHROMIUM ValueType;
+ static const CommandId kCmdId = kGetMultipleIntegervCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _pnames_shm_id,
+ uint32_t _pnames_shm_offset,
+ GLuint _count,
+ uint32_t _results_shm_id,
+ uint32_t _results_shm_offset,
+ GLsizeiptr _size) {
+ SetHeader();
+ pnames_shm_id = _pnames_shm_id;
+ pnames_shm_offset = _pnames_shm_offset;
+ count = _count;
+ results_shm_id = _results_shm_id;
+ results_shm_offset = _results_shm_offset;
+ size = _size;
+ }
+
+ void* Set(void* cmd,
+ uint32_t _pnames_shm_id,
+ uint32_t _pnames_shm_offset,
+ GLuint _count,
+ uint32_t _results_shm_id,
+ uint32_t _results_shm_offset,
+ GLsizeiptr _size) {
+ static_cast<ValueType*>(cmd)->Init(_pnames_shm_id,
+ _pnames_shm_offset,
+ _count,
+ _results_shm_id,
+ _results_shm_offset,
+ _size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pnames_shm_id;
+ uint32_t pnames_shm_offset;
+ uint32_t count;
+ uint32_t results_shm_id;
+ uint32_t results_shm_offset;
+ int32_t size;
+};
+
+COMPILE_ASSERT(sizeof(GetMultipleIntegervCHROMIUM) == 28,
+ Sizeof_GetMultipleIntegervCHROMIUM_is_not_28);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, header) == 0,
+ OffsetOf_GetMultipleIntegervCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, pnames_shm_id) == 4,
+ OffsetOf_GetMultipleIntegervCHROMIUM_pnames_shm_id_not_4);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, pnames_shm_offset) == 8,
+ OffsetOf_GetMultipleIntegervCHROMIUM_pnames_shm_offset_not_8);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, count) == 12,
+ OffsetOf_GetMultipleIntegervCHROMIUM_count_not_12);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, results_shm_id) == 16,
+ OffsetOf_GetMultipleIntegervCHROMIUM_results_shm_id_not_16);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, results_shm_offset) == 20,
+ OffsetOf_GetMultipleIntegervCHROMIUM_results_shm_offset_not_20);
+COMPILE_ASSERT(offsetof(GetMultipleIntegervCHROMIUM, size) == 24,
+ OffsetOf_GetMultipleIntegervCHROMIUM_size_not_24);
+
+struct GetProgramInfoCHROMIUM {
+ typedef GetProgramInfoCHROMIUM ValueType;
+ static const CommandId kCmdId = kGetProgramInfoCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ struct Result {
+ uint32_t link_status;
+ uint32_t num_attribs;
+ uint32_t num_uniforms;
+ };
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, uint32_t _bucket_id) {
+ SetHeader();
+ program = _program;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _program, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_program, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetProgramInfoCHROMIUM) == 12,
+ Sizeof_GetProgramInfoCHROMIUM_is_not_12);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM, header) == 0,
+ OffsetOf_GetProgramInfoCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM, program) == 4,
+ OffsetOf_GetProgramInfoCHROMIUM_program_not_4);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM, bucket_id) == 8,
+ OffsetOf_GetProgramInfoCHROMIUM_bucket_id_not_8);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM::Result, link_status) == 0,
+ OffsetOf_GetProgramInfoCHROMIUM_Result_link_status_not_0);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM::Result, num_attribs) == 4,
+ OffsetOf_GetProgramInfoCHROMIUM_Result_num_attribs_not_4);
+COMPILE_ASSERT(offsetof(GetProgramInfoCHROMIUM::Result, num_uniforms) == 8,
+ OffsetOf_GetProgramInfoCHROMIUM_Result_num_uniforms_not_8);
+
+struct GetTranslatedShaderSourceANGLE {
+ typedef GetTranslatedShaderSourceANGLE ValueType;
+ static const CommandId kCmdId = kGetTranslatedShaderSourceANGLE;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _shader, uint32_t _bucket_id) {
+ SetHeader();
+ shader = _shader;
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _shader, uint32_t _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_shader, _bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t shader;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(GetTranslatedShaderSourceANGLE) == 12,
+ Sizeof_GetTranslatedShaderSourceANGLE_is_not_12);
+COMPILE_ASSERT(offsetof(GetTranslatedShaderSourceANGLE, header) == 0,
+ OffsetOf_GetTranslatedShaderSourceANGLE_header_not_0);
+COMPILE_ASSERT(offsetof(GetTranslatedShaderSourceANGLE, shader) == 4,
+ OffsetOf_GetTranslatedShaderSourceANGLE_shader_not_4);
+COMPILE_ASSERT(offsetof(GetTranslatedShaderSourceANGLE, bucket_id) == 8,
+ OffsetOf_GetTranslatedShaderSourceANGLE_bucket_id_not_8);
+
+struct PostSubBufferCHROMIUM {
+ typedef PostSubBufferCHROMIUM ValueType;
+ static const CommandId kCmdId = kPostSubBufferCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _x, GLint _y, GLint _width, GLint _height) {
+ SetHeader();
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd, GLint _x, GLint _y, GLint _width, GLint _height) {
+ static_cast<ValueType*>(cmd)->Init(_x, _y, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+COMPILE_ASSERT(sizeof(PostSubBufferCHROMIUM) == 20,
+ Sizeof_PostSubBufferCHROMIUM_is_not_20);
+COMPILE_ASSERT(offsetof(PostSubBufferCHROMIUM, header) == 0,
+ OffsetOf_PostSubBufferCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(PostSubBufferCHROMIUM, x) == 4,
+ OffsetOf_PostSubBufferCHROMIUM_x_not_4);
+COMPILE_ASSERT(offsetof(PostSubBufferCHROMIUM, y) == 8,
+ OffsetOf_PostSubBufferCHROMIUM_y_not_8);
+COMPILE_ASSERT(offsetof(PostSubBufferCHROMIUM, width) == 12,
+ OffsetOf_PostSubBufferCHROMIUM_width_not_12);
+COMPILE_ASSERT(offsetof(PostSubBufferCHROMIUM, height) == 16,
+ OffsetOf_PostSubBufferCHROMIUM_height_not_16);
+
+struct TexImageIOSurface2DCHROMIUM {
+ typedef TexImageIOSurface2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kTexImageIOSurface2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizei _width,
+ GLsizei _height,
+ GLuint _ioSurfaceId,
+ GLuint _plane) {
+ SetHeader();
+ target = _target;
+ width = _width;
+ height = _height;
+ ioSurfaceId = _ioSurfaceId;
+ plane = _plane;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _width,
+ GLsizei _height,
+ GLuint _ioSurfaceId,
+ GLuint _plane) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_target, _width, _height, _ioSurfaceId, _plane);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t width;
+ int32_t height;
+ uint32_t ioSurfaceId;
+ uint32_t plane;
+};
+
+COMPILE_ASSERT(sizeof(TexImageIOSurface2DCHROMIUM) == 24,
+ Sizeof_TexImageIOSurface2DCHROMIUM_is_not_24);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, header) == 0,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, target) == 4,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, width) == 8,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_width_not_8);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, height) == 12,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_height_not_12);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, ioSurfaceId) == 16,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_ioSurfaceId_not_16);
+COMPILE_ASSERT(offsetof(TexImageIOSurface2DCHROMIUM, plane) == 20,
+ OffsetOf_TexImageIOSurface2DCHROMIUM_plane_not_20);
+
+struct CopyTextureCHROMIUM {
+ typedef CopyTextureCHROMIUM ValueType;
+ static const CommandId kCmdId = kCopyTextureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLenum _source_id,
+ GLenum _dest_id,
+ GLint _level,
+ GLint _internalformat,
+ GLenum _dest_type) {
+ SetHeader();
+ target = _target;
+ source_id = _source_id;
+ dest_id = _dest_id;
+ level = _level;
+ internalformat = _internalformat;
+ dest_type = _dest_type;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLenum _source_id,
+ GLenum _dest_id,
+ GLint _level,
+ GLint _internalformat,
+ GLenum _dest_type) {
+ static_cast<ValueType*>(cmd)->Init(
+ _target, _source_id, _dest_id, _level, _internalformat, _dest_type);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t source_id;
+ uint32_t dest_id;
+ int32_t level;
+ int32_t internalformat;
+ uint32_t dest_type;
+};
+
+COMPILE_ASSERT(sizeof(CopyTextureCHROMIUM) == 28,
+ Sizeof_CopyTextureCHROMIUM_is_not_28);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, header) == 0,
+ OffsetOf_CopyTextureCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, target) == 4,
+ OffsetOf_CopyTextureCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, source_id) == 8,
+ OffsetOf_CopyTextureCHROMIUM_source_id_not_8);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, dest_id) == 12,
+ OffsetOf_CopyTextureCHROMIUM_dest_id_not_12);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, level) == 16,
+ OffsetOf_CopyTextureCHROMIUM_level_not_16);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, internalformat) == 20,
+ OffsetOf_CopyTextureCHROMIUM_internalformat_not_20);
+COMPILE_ASSERT(offsetof(CopyTextureCHROMIUM, dest_type) == 24,
+ OffsetOf_CopyTextureCHROMIUM_dest_type_not_24);
+
+struct DrawArraysInstancedANGLE {
+ typedef DrawArraysInstancedANGLE ValueType;
+ static const CommandId kCmdId = kDrawArraysInstancedANGLE;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode, GLint _first, GLsizei _count, GLsizei _primcount) {
+ SetHeader();
+ mode = _mode;
+ first = _first;
+ count = _count;
+ primcount = _primcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ GLint _first,
+ GLsizei _count,
+ GLsizei _primcount) {
+ static_cast<ValueType*>(cmd)->Init(_mode, _first, _count, _primcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ int32_t first;
+ int32_t count;
+ int32_t primcount;
+};
+
+COMPILE_ASSERT(sizeof(DrawArraysInstancedANGLE) == 20,
+ Sizeof_DrawArraysInstancedANGLE_is_not_20);
+COMPILE_ASSERT(offsetof(DrawArraysInstancedANGLE, header) == 0,
+ OffsetOf_DrawArraysInstancedANGLE_header_not_0);
+COMPILE_ASSERT(offsetof(DrawArraysInstancedANGLE, mode) == 4,
+ OffsetOf_DrawArraysInstancedANGLE_mode_not_4);
+COMPILE_ASSERT(offsetof(DrawArraysInstancedANGLE, first) == 8,
+ OffsetOf_DrawArraysInstancedANGLE_first_not_8);
+COMPILE_ASSERT(offsetof(DrawArraysInstancedANGLE, count) == 12,
+ OffsetOf_DrawArraysInstancedANGLE_count_not_12);
+COMPILE_ASSERT(offsetof(DrawArraysInstancedANGLE, primcount) == 16,
+ OffsetOf_DrawArraysInstancedANGLE_primcount_not_16);
+
+struct DrawElementsInstancedANGLE {
+ typedef DrawElementsInstancedANGLE ValueType;
+ static const CommandId kCmdId = kDrawElementsInstancedANGLE;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode,
+ GLsizei _count,
+ GLenum _type,
+ GLuint _index_offset,
+ GLsizei _primcount) {
+ SetHeader();
+ mode = _mode;
+ count = _count;
+ type = _type;
+ index_offset = _index_offset;
+ primcount = _primcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ GLsizei _count,
+ GLenum _type,
+ GLuint _index_offset,
+ GLsizei _primcount) {
+ static_cast<ValueType*>(cmd)
+ ->Init(_mode, _count, _type, _index_offset, _primcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ int32_t count;
+ uint32_t type;
+ uint32_t index_offset;
+ int32_t primcount;
+};
+
+COMPILE_ASSERT(sizeof(DrawElementsInstancedANGLE) == 24,
+ Sizeof_DrawElementsInstancedANGLE_is_not_24);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, header) == 0,
+ OffsetOf_DrawElementsInstancedANGLE_header_not_0);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, mode) == 4,
+ OffsetOf_DrawElementsInstancedANGLE_mode_not_4);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, count) == 8,
+ OffsetOf_DrawElementsInstancedANGLE_count_not_8);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, type) == 12,
+ OffsetOf_DrawElementsInstancedANGLE_type_not_12);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, index_offset) == 16,
+ OffsetOf_DrawElementsInstancedANGLE_index_offset_not_16);
+COMPILE_ASSERT(offsetof(DrawElementsInstancedANGLE, primcount) == 20,
+ OffsetOf_DrawElementsInstancedANGLE_primcount_not_20);
+
+struct VertexAttribDivisorANGLE {
+ typedef VertexAttribDivisorANGLE ValueType;
+ static const CommandId kCmdId = kVertexAttribDivisorANGLE;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _index, GLuint _divisor) {
+ SetHeader();
+ index = _index;
+ divisor = _divisor;
+ }
+
+ void* Set(void* cmd, GLuint _index, GLuint _divisor) {
+ static_cast<ValueType*>(cmd)->Init(_index, _divisor);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t index;
+ uint32_t divisor;
+};
+
+COMPILE_ASSERT(sizeof(VertexAttribDivisorANGLE) == 12,
+ Sizeof_VertexAttribDivisorANGLE_is_not_12);
+COMPILE_ASSERT(offsetof(VertexAttribDivisorANGLE, header) == 0,
+ OffsetOf_VertexAttribDivisorANGLE_header_not_0);
+COMPILE_ASSERT(offsetof(VertexAttribDivisorANGLE, index) == 4,
+ OffsetOf_VertexAttribDivisorANGLE_index_not_4);
+COMPILE_ASSERT(offsetof(VertexAttribDivisorANGLE, divisor) == 8,
+ OffsetOf_VertexAttribDivisorANGLE_divisor_not_8);
+
+struct ProduceTextureCHROMIUMImmediate {
+ typedef ProduceTextureCHROMIUMImmediate ValueType;
+ static const CommandId kCmdId = kProduceTextureCHROMIUMImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLbyte) * 64); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLenum _target, const GLbyte* _mailbox) {
+ SetHeader();
+ target = _target;
+ memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLenum _target, const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_target, _mailbox);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+};
+
+COMPILE_ASSERT(sizeof(ProduceTextureCHROMIUMImmediate) == 8,
+ Sizeof_ProduceTextureCHROMIUMImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(ProduceTextureCHROMIUMImmediate, header) == 0,
+ OffsetOf_ProduceTextureCHROMIUMImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(ProduceTextureCHROMIUMImmediate, target) == 4,
+ OffsetOf_ProduceTextureCHROMIUMImmediate_target_not_4);
+
+struct ProduceTextureDirectCHROMIUMImmediate {
+ typedef ProduceTextureDirectCHROMIUMImmediate ValueType;
+ static const CommandId kCmdId = kProduceTextureDirectCHROMIUMImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLbyte) * 64); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _texture, GLenum _target, const GLbyte* _mailbox) {
+ SetHeader();
+ texture = _texture;
+ target = _target;
+ memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture,
+ GLenum _target,
+ const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_texture, _target, _mailbox);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture;
+ uint32_t target;
+};
+
+COMPILE_ASSERT(sizeof(ProduceTextureDirectCHROMIUMImmediate) == 12,
+ Sizeof_ProduceTextureDirectCHROMIUMImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(ProduceTextureDirectCHROMIUMImmediate, header) == 0,
+ OffsetOf_ProduceTextureDirectCHROMIUMImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(ProduceTextureDirectCHROMIUMImmediate, texture) == 4,
+ OffsetOf_ProduceTextureDirectCHROMIUMImmediate_texture_not_4);
+COMPILE_ASSERT(offsetof(ProduceTextureDirectCHROMIUMImmediate, target) == 8,
+ OffsetOf_ProduceTextureDirectCHROMIUMImmediate_target_not_8);
+
+struct ConsumeTextureCHROMIUMImmediate {
+ typedef ConsumeTextureCHROMIUMImmediate ValueType;
+ static const CommandId kCmdId = kConsumeTextureCHROMIUMImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLbyte) * 64); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLenum _target, const GLbyte* _mailbox) {
+ SetHeader();
+ target = _target;
+ memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLenum _target, const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_target, _mailbox);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+};
+
+COMPILE_ASSERT(sizeof(ConsumeTextureCHROMIUMImmediate) == 8,
+ Sizeof_ConsumeTextureCHROMIUMImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(ConsumeTextureCHROMIUMImmediate, header) == 0,
+ OffsetOf_ConsumeTextureCHROMIUMImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(ConsumeTextureCHROMIUMImmediate, target) == 4,
+ OffsetOf_ConsumeTextureCHROMIUMImmediate_target_not_4);
+
+struct BindUniformLocationCHROMIUMBucket {
+ typedef BindUniformLocationCHROMIUMBucket ValueType;
+ static const CommandId kCmdId = kBindUniformLocationCHROMIUMBucket;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program, GLint _location, uint32_t _name_bucket_id) {
+ SetHeader();
+ program = _program;
+ location = _location;
+ name_bucket_id = _name_bucket_id;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLint _location,
+ uint32_t _name_bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_program, _location, _name_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ int32_t location;
+ uint32_t name_bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(BindUniformLocationCHROMIUMBucket) == 16,
+ Sizeof_BindUniformLocationCHROMIUMBucket_is_not_16);
+COMPILE_ASSERT(offsetof(BindUniformLocationCHROMIUMBucket, header) == 0,
+ OffsetOf_BindUniformLocationCHROMIUMBucket_header_not_0);
+COMPILE_ASSERT(offsetof(BindUniformLocationCHROMIUMBucket, program) == 4,
+ OffsetOf_BindUniformLocationCHROMIUMBucket_program_not_4);
+COMPILE_ASSERT(offsetof(BindUniformLocationCHROMIUMBucket, location) == 8,
+ OffsetOf_BindUniformLocationCHROMIUMBucket_location_not_8);
+COMPILE_ASSERT(
+ offsetof(BindUniformLocationCHROMIUMBucket, name_bucket_id) == 12,
+ OffsetOf_BindUniformLocationCHROMIUMBucket_name_bucket_id_not_12);
+
+struct BindTexImage2DCHROMIUM {
+ typedef BindTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kBindTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLint _imageId) {
+ SetHeader();
+ target = _target;
+ imageId = _imageId;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLint _imageId) {
+ static_cast<ValueType*>(cmd)->Init(_target, _imageId);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t imageId;
+};
+
+COMPILE_ASSERT(sizeof(BindTexImage2DCHROMIUM) == 12,
+ Sizeof_BindTexImage2DCHROMIUM_is_not_12);
+COMPILE_ASSERT(offsetof(BindTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_BindTexImage2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(BindTexImage2DCHROMIUM, target) == 4,
+ OffsetOf_BindTexImage2DCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(BindTexImage2DCHROMIUM, imageId) == 8,
+ OffsetOf_BindTexImage2DCHROMIUM_imageId_not_8);
+
+struct ReleaseTexImage2DCHROMIUM {
+ typedef ReleaseTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kReleaseTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLint _imageId) {
+ SetHeader();
+ target = _target;
+ imageId = _imageId;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLint _imageId) {
+ static_cast<ValueType*>(cmd)->Init(_target, _imageId);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t imageId;
+};
+
+COMPILE_ASSERT(sizeof(ReleaseTexImage2DCHROMIUM) == 12,
+ Sizeof_ReleaseTexImage2DCHROMIUM_is_not_12);
+COMPILE_ASSERT(offsetof(ReleaseTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_ReleaseTexImage2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(ReleaseTexImage2DCHROMIUM, target) == 4,
+ OffsetOf_ReleaseTexImage2DCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(ReleaseTexImage2DCHROMIUM, imageId) == 8,
+ OffsetOf_ReleaseTexImage2DCHROMIUM_imageId_not_8);
+
+struct TraceBeginCHROMIUM {
+ typedef TraceBeginCHROMIUM ValueType;
+ static const CommandId kCmdId = kTraceBeginCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _bucket_id) {
+ SetHeader();
+ bucket_id = _bucket_id;
+ }
+
+ void* Set(void* cmd, GLuint _bucket_id) {
+ static_cast<ValueType*>(cmd)->Init(_bucket_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t bucket_id;
+};
+
+COMPILE_ASSERT(sizeof(TraceBeginCHROMIUM) == 8,
+ Sizeof_TraceBeginCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(TraceBeginCHROMIUM, header) == 0,
+ OffsetOf_TraceBeginCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(TraceBeginCHROMIUM, bucket_id) == 4,
+ OffsetOf_TraceBeginCHROMIUM_bucket_id_not_4);
+
+struct TraceEndCHROMIUM {
+ typedef TraceEndCHROMIUM ValueType;
+ static const CommandId kCmdId = kTraceEndCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(TraceEndCHROMIUM) == 4, Sizeof_TraceEndCHROMIUM_is_not_4);
+COMPILE_ASSERT(offsetof(TraceEndCHROMIUM, header) == 0,
+ OffsetOf_TraceEndCHROMIUM_header_not_0);
+
+struct AsyncTexSubImage2DCHROMIUM {
+ typedef AsyncTexSubImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kAsyncTexSubImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset,
+ uint32_t _async_upload_token,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ xoffset = _xoffset;
+ yoffset = _yoffset;
+ width = _width;
+ height = _height;
+ format = _format;
+ type = _type;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ async_upload_token = _async_upload_token;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset,
+ uint32_t _async_upload_token,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _xoffset,
+ _yoffset,
+ _width,
+ _height,
+ _format,
+ _type,
+ _data_shm_id,
+ _data_shm_offset,
+ _async_upload_token,
+ _sync_data_shm_id,
+ _sync_data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t xoffset;
+ int32_t yoffset;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t type;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+ uint32_t async_upload_token;
+ uint32_t sync_data_shm_id;
+ uint32_t sync_data_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(AsyncTexSubImage2DCHROMIUM) == 56,
+ Sizeof_AsyncTexSubImage2DCHROMIUM_is_not_56);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, header) == 0,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, target) == 4,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, level) == 8,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_level_not_8);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, xoffset) == 12,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_xoffset_not_12);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, yoffset) == 16,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_yoffset_not_16);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, width) == 20,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_width_not_20);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, height) == 24,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_height_not_24);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, format) == 28,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_format_not_28);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, type) == 32,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_type_not_32);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, data_shm_id) == 36,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_data_shm_id_not_36);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, data_shm_offset) == 40,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_data_shm_offset_not_40);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, async_upload_token) == 44,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_async_upload_token_not_44);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, sync_data_shm_id) == 48,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_sync_data_shm_id_not_48);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, sync_data_shm_offset) == 52,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_sync_data_shm_offset_not_52);
+
+struct AsyncTexImage2DCHROMIUM {
+ typedef AsyncTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kAsyncTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLint _level,
+ GLint _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ uint32_t _async_upload_token,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ SetHeader();
+ target = _target;
+ level = _level;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ format = _format;
+ type = _type;
+ pixels_shm_id = _pixels_shm_id;
+ pixels_shm_offset = _pixels_shm_offset;
+ async_upload_token = _async_upload_token;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLint _level,
+ GLint _internalformat,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type,
+ uint32_t _pixels_shm_id,
+ uint32_t _pixels_shm_offset,
+ uint32_t _async_upload_token,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target,
+ _level,
+ _internalformat,
+ _width,
+ _height,
+ _format,
+ _type,
+ _pixels_shm_id,
+ _pixels_shm_offset,
+ _async_upload_token,
+ _sync_data_shm_id,
+ _sync_data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t level;
+ int32_t internalformat;
+ int32_t width;
+ int32_t height;
+ uint32_t format;
+ uint32_t type;
+ uint32_t pixels_shm_id;
+ uint32_t pixels_shm_offset;
+ uint32_t async_upload_token;
+ uint32_t sync_data_shm_id;
+ uint32_t sync_data_shm_offset;
+ static const int32_t border = 0;
+};
+
+COMPILE_ASSERT(sizeof(AsyncTexImage2DCHROMIUM) == 52,
+ Sizeof_AsyncTexImage2DCHROMIUM_is_not_52);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_AsyncTexImage2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, target) == 4,
+ OffsetOf_AsyncTexImage2DCHROMIUM_target_not_4);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, level) == 8,
+ OffsetOf_AsyncTexImage2DCHROMIUM_level_not_8);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, internalformat) == 12,
+ OffsetOf_AsyncTexImage2DCHROMIUM_internalformat_not_12);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, width) == 16,
+ OffsetOf_AsyncTexImage2DCHROMIUM_width_not_16);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, height) == 20,
+ OffsetOf_AsyncTexImage2DCHROMIUM_height_not_20);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, format) == 24,
+ OffsetOf_AsyncTexImage2DCHROMIUM_format_not_24);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, type) == 28,
+ OffsetOf_AsyncTexImage2DCHROMIUM_type_not_28);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, pixels_shm_id) == 32,
+ OffsetOf_AsyncTexImage2DCHROMIUM_pixels_shm_id_not_32);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, pixels_shm_offset) == 36,
+ OffsetOf_AsyncTexImage2DCHROMIUM_pixels_shm_offset_not_36);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, async_upload_token) == 40,
+ OffsetOf_AsyncTexImage2DCHROMIUM_async_upload_token_not_40);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, sync_data_shm_id) == 44,
+ OffsetOf_AsyncTexImage2DCHROMIUM_sync_data_shm_id_not_44);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, sync_data_shm_offset) == 48,
+ OffsetOf_AsyncTexImage2DCHROMIUM_sync_data_shm_offset_not_48);
+
+struct WaitAsyncTexImage2DCHROMIUM {
+ typedef WaitAsyncTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitAsyncTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target) {
+ SetHeader();
+ target = _target;
+ }
+
+ void* Set(void* cmd, GLenum _target) {
+ static_cast<ValueType*>(cmd)->Init(_target);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+};
+
+COMPILE_ASSERT(sizeof(WaitAsyncTexImage2DCHROMIUM) == 8,
+ Sizeof_WaitAsyncTexImage2DCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(WaitAsyncTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_WaitAsyncTexImage2DCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(WaitAsyncTexImage2DCHROMIUM, target) == 4,
+ OffsetOf_WaitAsyncTexImage2DCHROMIUM_target_not_4);
+
+struct WaitAllAsyncTexImage2DCHROMIUM {
+ typedef WaitAllAsyncTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitAllAsyncTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(WaitAllAsyncTexImage2DCHROMIUM) == 4,
+ Sizeof_WaitAllAsyncTexImage2DCHROMIUM_is_not_4);
+COMPILE_ASSERT(offsetof(WaitAllAsyncTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_WaitAllAsyncTexImage2DCHROMIUM_header_not_0);
+
+struct DiscardFramebufferEXTImmediate {
+ typedef DiscardFramebufferEXTImmediate ValueType;
+ static const CommandId kCmdId = kDiscardFramebufferEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLenum) * 1 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLenum _target, GLsizei _count, const GLenum* _attachments) {
+ SetHeader(_count);
+ target = _target;
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _attachments, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _count,
+ const GLenum* _attachments) {
+ static_cast<ValueType*>(cmd)->Init(_target, _count, _attachments);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(DiscardFramebufferEXTImmediate) == 12,
+ Sizeof_DiscardFramebufferEXTImmediate_is_not_12);
+COMPILE_ASSERT(offsetof(DiscardFramebufferEXTImmediate, header) == 0,
+ OffsetOf_DiscardFramebufferEXTImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DiscardFramebufferEXTImmediate, target) == 4,
+ OffsetOf_DiscardFramebufferEXTImmediate_target_not_4);
+COMPILE_ASSERT(offsetof(DiscardFramebufferEXTImmediate, count) == 8,
+ OffsetOf_DiscardFramebufferEXTImmediate_count_not_8);
+
+struct LoseContextCHROMIUM {
+ typedef LoseContextCHROMIUM ValueType;
+ static const CommandId kCmdId = kLoseContextCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _current, GLenum _other) {
+ SetHeader();
+ current = _current;
+ other = _other;
+ }
+
+ void* Set(void* cmd, GLenum _current, GLenum _other) {
+ static_cast<ValueType*>(cmd)->Init(_current, _other);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t current;
+ uint32_t other;
+};
+
+COMPILE_ASSERT(sizeof(LoseContextCHROMIUM) == 12,
+ Sizeof_LoseContextCHROMIUM_is_not_12);
+COMPILE_ASSERT(offsetof(LoseContextCHROMIUM, header) == 0,
+ OffsetOf_LoseContextCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(LoseContextCHROMIUM, current) == 4,
+ OffsetOf_LoseContextCHROMIUM_current_not_4);
+COMPILE_ASSERT(offsetof(LoseContextCHROMIUM, other) == 8,
+ OffsetOf_LoseContextCHROMIUM_other_not_8);
+
+struct WaitSyncPointCHROMIUM {
+ typedef WaitSyncPointCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitSyncPointCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _sync_point) {
+ SetHeader();
+ sync_point = _sync_point;
+ }
+
+ void* Set(void* cmd, GLuint _sync_point) {
+ static_cast<ValueType*>(cmd)->Init(_sync_point);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t sync_point;
+};
+
+COMPILE_ASSERT(sizeof(WaitSyncPointCHROMIUM) == 8,
+ Sizeof_WaitSyncPointCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(WaitSyncPointCHROMIUM, header) == 0,
+ OffsetOf_WaitSyncPointCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(WaitSyncPointCHROMIUM, sync_point) == 4,
+ OffsetOf_WaitSyncPointCHROMIUM_sync_point_not_4);
+
+struct DrawBuffersEXTImmediate {
+ typedef DrawBuffersEXTImmediate ValueType;
+ static const CommandId kCmdId = kDrawBuffersEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(GLenum) * 1 * count); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei count) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(count)); // NOLINT
+ }
+
+ void SetHeader(GLsizei count) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(count));
+ }
+
+ void Init(GLsizei _count, const GLenum* _bufs) {
+ SetHeader(_count);
+ count = _count;
+ memcpy(ImmediateDataAddress(this), _bufs, ComputeDataSize(_count));
+ }
+
+ void* Set(void* cmd, GLsizei _count, const GLenum* _bufs) {
+ static_cast<ValueType*>(cmd)->Init(_count, _bufs);
+ const uint32_t size = ComputeSize(_count);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t count;
+};
+
+COMPILE_ASSERT(sizeof(DrawBuffersEXTImmediate) == 8,
+ Sizeof_DrawBuffersEXTImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(DrawBuffersEXTImmediate, header) == 0,
+ OffsetOf_DrawBuffersEXTImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(DrawBuffersEXTImmediate, count) == 4,
+ OffsetOf_DrawBuffersEXTImmediate_count_not_4);
+
+struct DiscardBackbufferCHROMIUM {
+ typedef DiscardBackbufferCHROMIUM ValueType;
+ static const CommandId kCmdId = kDiscardBackbufferCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(DiscardBackbufferCHROMIUM) == 4,
+ Sizeof_DiscardBackbufferCHROMIUM_is_not_4);
+COMPILE_ASSERT(offsetof(DiscardBackbufferCHROMIUM, header) == 0,
+ OffsetOf_DiscardBackbufferCHROMIUM_header_not_0);
+
+struct ScheduleOverlayPlaneCHROMIUM {
+ typedef ScheduleOverlayPlaneCHROMIUM ValueType;
+ static const CommandId kCmdId = kScheduleOverlayPlaneCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _plane_z_order,
+ GLenum _plane_transform,
+ GLuint _overlay_texture_id,
+ GLint _bounds_x,
+ GLint _bounds_y,
+ GLint _bounds_width,
+ GLint _bounds_height,
+ GLfloat _uv_x,
+ GLfloat _uv_y,
+ GLfloat _uv_width,
+ GLfloat _uv_height) {
+ SetHeader();
+ plane_z_order = _plane_z_order;
+ plane_transform = _plane_transform;
+ overlay_texture_id = _overlay_texture_id;
+ bounds_x = _bounds_x;
+ bounds_y = _bounds_y;
+ bounds_width = _bounds_width;
+ bounds_height = _bounds_height;
+ uv_x = _uv_x;
+ uv_y = _uv_y;
+ uv_width = _uv_width;
+ uv_height = _uv_height;
+ }
+
+ void* Set(void* cmd,
+ GLint _plane_z_order,
+ GLenum _plane_transform,
+ GLuint _overlay_texture_id,
+ GLint _bounds_x,
+ GLint _bounds_y,
+ GLint _bounds_width,
+ GLint _bounds_height,
+ GLfloat _uv_x,
+ GLfloat _uv_y,
+ GLfloat _uv_width,
+ GLfloat _uv_height) {
+ static_cast<ValueType*>(cmd)->Init(_plane_z_order,
+ _plane_transform,
+ _overlay_texture_id,
+ _bounds_x,
+ _bounds_y,
+ _bounds_width,
+ _bounds_height,
+ _uv_x,
+ _uv_y,
+ _uv_width,
+ _uv_height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t plane_z_order;
+ uint32_t plane_transform;
+ uint32_t overlay_texture_id;
+ int32_t bounds_x;
+ int32_t bounds_y;
+ int32_t bounds_width;
+ int32_t bounds_height;
+ float uv_x;
+ float uv_y;
+ float uv_width;
+ float uv_height;
+};
+
+COMPILE_ASSERT(sizeof(ScheduleOverlayPlaneCHROMIUM) == 48,
+ Sizeof_ScheduleOverlayPlaneCHROMIUM_is_not_48);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, header) == 0,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, plane_z_order) == 4,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_plane_z_order_not_4);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, plane_transform) == 8,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_plane_transform_not_8);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, overlay_texture_id) == 12,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_overlay_texture_id_not_12);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, bounds_x) == 16,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_bounds_x_not_16);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, bounds_y) == 20,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_bounds_y_not_20);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, bounds_width) == 24,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_bounds_width_not_24);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, bounds_height) == 28,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_bounds_height_not_28);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, uv_x) == 32,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_uv_x_not_32);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, uv_y) == 36,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_uv_y_not_36);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, uv_width) == 40,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_uv_width_not_40);
+COMPILE_ASSERT(offsetof(ScheduleOverlayPlaneCHROMIUM, uv_height) == 44,
+ OffsetOf_ScheduleOverlayPlaneCHROMIUM_uv_height_not_44);
+
+struct MatrixLoadfCHROMIUMImmediate {
+ typedef MatrixLoadfCHROMIUMImmediate ValueType;
+ static const CommandId kCmdId = kMatrixLoadfCHROMIUMImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLfloat) * 16); // NOLINT
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize()); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLenum _matrixMode, const GLfloat* _m) {
+ SetHeader();
+ matrixMode = _matrixMode;
+ memcpy(ImmediateDataAddress(this), _m, ComputeDataSize());
+ }
+
+ void* Set(void* cmd, GLenum _matrixMode, const GLfloat* _m) {
+ static_cast<ValueType*>(cmd)->Init(_matrixMode, _m);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t matrixMode;
+};
+
+COMPILE_ASSERT(sizeof(MatrixLoadfCHROMIUMImmediate) == 8,
+ Sizeof_MatrixLoadfCHROMIUMImmediate_is_not_8);
+COMPILE_ASSERT(offsetof(MatrixLoadfCHROMIUMImmediate, header) == 0,
+ OffsetOf_MatrixLoadfCHROMIUMImmediate_header_not_0);
+COMPILE_ASSERT(offsetof(MatrixLoadfCHROMIUMImmediate, matrixMode) == 4,
+ OffsetOf_MatrixLoadfCHROMIUMImmediate_matrixMode_not_4);
+
+struct MatrixLoadIdentityCHROMIUM {
+ typedef MatrixLoadIdentityCHROMIUM ValueType;
+ static const CommandId kCmdId = kMatrixLoadIdentityCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _matrixMode) {
+ SetHeader();
+ matrixMode = _matrixMode;
+ }
+
+ void* Set(void* cmd, GLenum _matrixMode) {
+ static_cast<ValueType*>(cmd)->Init(_matrixMode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t matrixMode;
+};
+
+COMPILE_ASSERT(sizeof(MatrixLoadIdentityCHROMIUM) == 8,
+ Sizeof_MatrixLoadIdentityCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(MatrixLoadIdentityCHROMIUM, header) == 0,
+ OffsetOf_MatrixLoadIdentityCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(MatrixLoadIdentityCHROMIUM, matrixMode) == 4,
+ OffsetOf_MatrixLoadIdentityCHROMIUM_matrixMode_not_4);
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test.cc b/gpu/command_buffer/common/gles2_cmd_format_test.cc
new file mode 100644
index 0000000..717e6fb
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_format_test.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains unit tests for gles2 commmands
+
+#include <limits>
+
+#include "base/bind.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2FormatTest : public testing::Test {
+ protected:
+ static const unsigned char kInitialValue = 0xBD;
+
+ virtual void SetUp() {
+ memset(buffer_, kInitialValue, sizeof(buffer_));
+ }
+
+ virtual void TearDown() {
+ }
+
+ template <typename T>
+ T* GetBufferAs() {
+ return static_cast<T*>(static_cast<void*>(&buffer_));
+ }
+
+ void CheckBytesWritten(
+ const void* end, size_t expected_size, size_t written_size) {
+ size_t actual_size = static_cast<const unsigned char*>(end) -
+ GetBufferAs<const unsigned char>();
+ EXPECT_LT(actual_size, sizeof(buffer_));
+ EXPECT_GT(actual_size, 0u);
+ EXPECT_EQ(expected_size, actual_size);
+ EXPECT_EQ(kInitialValue, buffer_[written_size]);
+ EXPECT_NE(kInitialValue, buffer_[written_size - 1]);
+ }
+
+ void CheckBytesWrittenMatchesExpectedSize(
+ const void* end, size_t expected_size) {
+ CheckBytesWritten(end, expected_size, expected_size);
+ }
+
+ private:
+ unsigned char buffer_[1024];
+};
+
+void SignalCompletion(uint32* assigned_async_token_ptr,
+ uint32 async_token,
+ AsyncUploadSync* sync) {
+ EXPECT_EQ(async_token, *assigned_async_token_ptr);
+ sync->SetAsyncUploadToken(async_token);
+}
+
+TEST(GLES2FormatAsyncUploadSyncTest, AsyncUploadSync) {
+ const size_t kSize = 10;
+ const size_t kCount = 1000;
+
+ base::Thread thread("GLES2FormatUploadSyncTest - Fake Upload Thread");
+ thread.Start();
+
+ // Run the same test 50 times so we retest the wrap as well.
+ for (size_t test_run = 0; test_run < 50; ++test_run) {
+ AsyncUploadSync sync;
+ sync.Reset();
+
+ uint32 buffer_tokens[kSize];
+ memset(buffer_tokens, 0, sizeof(buffer_tokens));
+
+ // Start with a token large enough so that we'll wrap.
+ uint32 async_token = std::numeric_limits<uint32>::max() - kCount / 2;
+
+ // Set initial async token.
+ sync.SetAsyncUploadToken(async_token);
+
+ for (size_t i = 0; i < kCount; ++i) {
+ size_t buffer = i % kSize;
+
+ // Loop until previous async token has passed if any was set.
+ while (buffer_tokens[buffer] &&
+ !sync.HasAsyncUploadTokenPassed(buffer_tokens[buffer]))
+ base::PlatformThread::YieldCurrentThread();
+
+ // Next token, skip 0.
+ async_token++;
+ if (async_token == 0)
+ async_token++;
+
+ // Set the buffer's associated token.
+ buffer_tokens[buffer] = async_token;
+
+ // Set the async upload token on the fake upload thread and assert that
+ // the associated buffer still has the given token.
+ thread.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&SignalCompletion,
+ &buffer_tokens[buffer],
+ async_token,
+ &sync));
+ }
+
+ // Flush the thread message loop before starting again.
+ base::WaitableEvent waitable(false, false);
+ thread.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&waitable)));
+ waitable.Wait();
+ }
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const unsigned char GLES2FormatTest::kInitialValue;
+#endif
+
+#include "gpu/command_buffer/common/gles2_cmd_format_test_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
new file mode 100644
index 0000000..7b7845b
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -0,0 +1,3435 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file contains unit tests for gles2 commmands
+// It is included by gles2_cmd_format_test.cc
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
+
+TEST_F(GLES2FormatTest, ActiveTexture) {
+ cmds::ActiveTexture& cmd = *GetBufferAs<cmds::ActiveTexture>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ActiveTexture::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.texture);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, AttachShader) {
+ cmds::AttachShader& cmd = *GetBufferAs<cmds::AttachShader>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::AttachShader::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.shader);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindAttribLocationBucket) {
+ cmds::BindAttribLocationBucket& cmd =
+ *GetBufferAs<cmds::BindAttribLocationBucket>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLuint>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindAttribLocationBucket::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindBuffer) {
+ cmds::BindBuffer& cmd = *GetBufferAs<cmds::BindBuffer>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindBuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.buffer);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindFramebuffer) {
+ cmds::BindFramebuffer& cmd = *GetBufferAs<cmds::BindFramebuffer>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindFramebuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.framebuffer);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindRenderbuffer) {
+ cmds::BindRenderbuffer& cmd = *GetBufferAs<cmds::BindRenderbuffer>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindRenderbuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.renderbuffer);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindTexture) {
+ cmds::BindTexture& cmd = *GetBufferAs<cmds::BindTexture>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindTexture::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.texture);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendColor) {
+ cmds::BlendColor& cmd = *GetBufferAs<cmds::BlendColor>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLclampf>(11),
+ static_cast<GLclampf>(12),
+ static_cast<GLclampf>(13),
+ static_cast<GLclampf>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendColor::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLclampf>(11), cmd.red);
+ EXPECT_EQ(static_cast<GLclampf>(12), cmd.green);
+ EXPECT_EQ(static_cast<GLclampf>(13), cmd.blue);
+ EXPECT_EQ(static_cast<GLclampf>(14), cmd.alpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendEquation) {
+ cmds::BlendEquation& cmd = *GetBufferAs<cmds::BlendEquation>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquation::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendEquationSeparate) {
+ cmds::BlendEquationSeparate& cmd =
+ *GetBufferAs<cmds::BlendEquationSeparate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquationSeparate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.modeRGB);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.modeAlpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendFunc) {
+ cmds::BlendFunc& cmd = *GetBufferAs<cmds::BlendFunc>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFunc::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.sfactor);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.dfactor);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendFuncSeparate) {
+ cmds::BlendFuncSeparate& cmd = *GetBufferAs<cmds::BlendFuncSeparate>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLenum>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFuncSeparate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.srcRGB);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.dstRGB);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.srcAlpha);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.dstAlpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BufferData) {
+ cmds::BufferData& cmd = *GetBufferAs<cmds::BufferData>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizeiptr>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14),
+ static_cast<GLenum>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BufferData::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizeiptr>(12), cmd.size);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.data_shm_offset);
+ EXPECT_EQ(static_cast<GLenum>(15), cmd.usage);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BufferSubData) {
+ cmds::BufferSubData& cmd = *GetBufferAs<cmds::BufferSubData>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLintptr>(12),
+ static_cast<GLsizeiptr>(13),
+ static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BufferSubData::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLintptr>(12), cmd.offset);
+ EXPECT_EQ(static_cast<GLsizeiptr>(13), cmd.size);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CheckFramebufferStatus) {
+ cmds::CheckFramebufferStatus& cmd =
+ *GetBufferAs<cmds::CheckFramebufferStatus>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CheckFramebufferStatus::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Clear) {
+ cmds::Clear& cmd = *GetBufferAs<cmds::Clear>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLbitfield>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Clear::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLbitfield>(11), cmd.mask);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ClearColor) {
+ cmds::ClearColor& cmd = *GetBufferAs<cmds::ClearColor>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLclampf>(11),
+ static_cast<GLclampf>(12),
+ static_cast<GLclampf>(13),
+ static_cast<GLclampf>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ClearColor::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLclampf>(11), cmd.red);
+ EXPECT_EQ(static_cast<GLclampf>(12), cmd.green);
+ EXPECT_EQ(static_cast<GLclampf>(13), cmd.blue);
+ EXPECT_EQ(static_cast<GLclampf>(14), cmd.alpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ClearDepthf) {
+ cmds::ClearDepthf& cmd = *GetBufferAs<cmds::ClearDepthf>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLclampf>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ClearDepthf::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLclampf>(11), cmd.depth);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ClearStencil) {
+ cmds::ClearStencil& cmd = *GetBufferAs<cmds::ClearStencil>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ClearStencil::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.s);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ColorMask) {
+ cmds::ColorMask& cmd = *GetBufferAs<cmds::ColorMask>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLboolean>(11),
+ static_cast<GLboolean>(12),
+ static_cast<GLboolean>(13),
+ static_cast<GLboolean>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ColorMask::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLboolean>(11), cmd.red);
+ EXPECT_EQ(static_cast<GLboolean>(12), cmd.green);
+ EXPECT_EQ(static_cast<GLboolean>(13), cmd.blue);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.alpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CompileShader) {
+ cmds::CompileShader& cmd = *GetBufferAs<cmds::CompileShader>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompileShader::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CompressedTexImage2DBucket) {
+ cmds::CompressedTexImage2DBucket& cmd =
+ *GetBufferAs<cmds::CompressedTexImage2DBucket>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLuint>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedTexImage2DBucket::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ EXPECT_EQ(static_cast<GLuint>(16), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CompressedTexImage2D) {
+ cmds::CompressedTexImage2D& cmd = *GetBufferAs<cmds::CompressedTexImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLsizei>(16),
+ static_cast<uint32_t>(17),
+ static_cast<uint32_t>(18));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedTexImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.imageSize);
+ EXPECT_EQ(static_cast<uint32_t>(17), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(18), cmd.data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CompressedTexSubImage2DBucket) {
+ cmds::CompressedTexSubImage2DBucket& cmd =
+ *GetBufferAs<cmds::CompressedTexSubImage2DBucket>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLsizei>(16),
+ static_cast<GLenum>(17),
+ static_cast<GLuint>(18));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedTexSubImage2DBucket::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.format);
+ EXPECT_EQ(static_cast<GLuint>(18), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CompressedTexSubImage2D) {
+ cmds::CompressedTexSubImage2D& cmd =
+ *GetBufferAs<cmds::CompressedTexSubImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLsizei>(16),
+ static_cast<GLenum>(17),
+ static_cast<GLsizei>(18),
+ static_cast<uint32_t>(19),
+ static_cast<uint32_t>(20));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedTexSubImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.format);
+ EXPECT_EQ(static_cast<GLsizei>(18), cmd.imageSize);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(20), cmd.data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CopyTexImage2D) {
+ cmds::CopyTexImage2D& cmd = *GetBufferAs<cmds::CopyTexImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15),
+ static_cast<GLsizei>(16),
+ static_cast<GLsizei>(17));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CopyTexImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(17), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CopyTexSubImage2D) {
+ cmds::CopyTexSubImage2D& cmd = *GetBufferAs<cmds::CopyTexSubImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15),
+ static_cast<GLint>(16),
+ static_cast<GLsizei>(17),
+ static_cast<GLsizei>(18));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CopyTexSubImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(17), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(18), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CreateProgram) {
+ cmds::CreateProgram& cmd = *GetBufferAs<cmds::CreateProgram>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<uint32_t>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CreateProgram::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.client_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CreateShader) {
+ cmds::CreateShader& cmd = *GetBufferAs<cmds::CreateShader>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CreateShader::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.client_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CullFace) {
+ cmds::CullFace& cmd = *GetBufferAs<cmds::CullFace>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CullFace::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DeleteBuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteBuffersImmediate& cmd =
+ *GetBufferAs<cmds::DeleteBuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteBuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DeleteFramebuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteFramebuffersImmediate& cmd =
+ *GetBufferAs<cmds::DeleteFramebuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteFramebuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DeleteProgram) {
+ cmds::DeleteProgram& cmd = *GetBufferAs<cmds::DeleteProgram>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteProgram::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DeleteRenderbuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteRenderbuffersImmediate& cmd =
+ *GetBufferAs<cmds::DeleteRenderbuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteRenderbuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DeleteShader) {
+ cmds::DeleteShader& cmd = *GetBufferAs<cmds::DeleteShader>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteShader::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DeleteTexturesImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetBufferAs<cmds::DeleteTexturesImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteTexturesImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DepthFunc) {
+ cmds::DepthFunc& cmd = *GetBufferAs<cmds::DepthFunc>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DepthFunc::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.func);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DepthMask) {
+ cmds::DepthMask& cmd = *GetBufferAs<cmds::DepthMask>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLboolean>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DepthMask::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLboolean>(11), cmd.flag);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DepthRangef) {
+ cmds::DepthRangef& cmd = *GetBufferAs<cmds::DepthRangef>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLclampf>(11), static_cast<GLclampf>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DepthRangef::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLclampf>(11), cmd.zNear);
+ EXPECT_EQ(static_cast<GLclampf>(12), cmd.zFar);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DetachShader) {
+ cmds::DetachShader& cmd = *GetBufferAs<cmds::DetachShader>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DetachShader::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.shader);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Disable) {
+ cmds::Disable& cmd = *GetBufferAs<cmds::Disable>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Disable::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.cap);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DisableVertexAttribArray) {
+ cmds::DisableVertexAttribArray& cmd =
+ *GetBufferAs<cmds::DisableVertexAttribArray>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DisableVertexAttribArray::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DrawArrays) {
+ cmds::DrawArrays& cmd = *GetBufferAs<cmds::DrawArrays>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLsizei>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DrawArrays::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.first);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DrawElements) {
+ cmds::DrawElements& cmd = *GetBufferAs<cmds::DrawElements>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DrawElements::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.count);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.type);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.index_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Enable) {
+ cmds::Enable& cmd = *GetBufferAs<cmds::Enable>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Enable::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.cap);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, EnableVertexAttribArray) {
+ cmds::EnableVertexAttribArray& cmd =
+ *GetBufferAs<cmds::EnableVertexAttribArray>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EnableVertexAttribArray::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Finish) {
+ cmds::Finish& cmd = *GetBufferAs<cmds::Finish>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Finish::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Flush) {
+ cmds::Flush& cmd = *GetBufferAs<cmds::Flush>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Flush::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, FramebufferRenderbuffer) {
+ cmds::FramebufferRenderbuffer& cmd =
+ *GetBufferAs<cmds::FramebufferRenderbuffer>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::FramebufferRenderbuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.attachment);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.renderbuffertarget);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.renderbuffer);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, FramebufferTexture2D) {
+ cmds::FramebufferTexture2D& cmd = *GetBufferAs<cmds::FramebufferTexture2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::FramebufferTexture2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.attachment);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.textarget);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.texture);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, FrontFace) {
+ cmds::FrontFace& cmd = *GetBufferAs<cmds::FrontFace>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::FrontFace::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GenBuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenBuffersImmediate& cmd = *GetBufferAs<cmds::GenBuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenBuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, GenerateMipmap) {
+ cmds::GenerateMipmap& cmd = *GetBufferAs<cmds::GenerateMipmap>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenerateMipmap::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GenFramebuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenFramebuffersImmediate& cmd =
+ *GetBufferAs<cmds::GenFramebuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenFramebuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, GenRenderbuffersImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenRenderbuffersImmediate& cmd =
+ *GetBufferAs<cmds::GenRenderbuffersImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenRenderbuffersImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, GenTexturesImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenTexturesImmediate& cmd = *GetBufferAs<cmds::GenTexturesImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenTexturesImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, GetActiveAttrib) {
+ cmds::GetActiveAttrib& cmd = *GetBufferAs<cmds::GetActiveAttrib>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLuint>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetActiveAttrib::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetActiveUniform) {
+ cmds::GetActiveUniform& cmd = *GetBufferAs<cmds::GetActiveUniform>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLuint>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetActiveUniform::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetAttachedShaders) {
+ cmds::GetAttachedShaders& cmd = *GetBufferAs<cmds::GetAttachedShaders>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetAttachedShaders::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetAttribLocation) {
+ cmds::GetAttribLocation& cmd = *GetBufferAs<cmds::GetAttribLocation>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetAttribLocation::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.location_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.location_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetBooleanv) {
+ cmds::GetBooleanv& cmd = *GetBufferAs<cmds::GetBooleanv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetBooleanv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetBufferParameteriv) {
+ cmds::GetBufferParameteriv& cmd = *GetBufferAs<cmds::GetBufferParameteriv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetBufferParameteriv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetError) {
+ cmds::GetError& cmd = *GetBufferAs<cmds::GetError>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetError::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetFloatv) {
+ cmds::GetFloatv& cmd = *GetBufferAs<cmds::GetFloatv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetFloatv::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetFramebufferAttachmentParameteriv) {
+ cmds::GetFramebufferAttachmentParameteriv& cmd =
+ *GetBufferAs<cmds::GetFramebufferAttachmentParameteriv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::GetFramebufferAttachmentParameteriv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.attachment);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetIntegerv) {
+ cmds::GetIntegerv& cmd = *GetBufferAs<cmds::GetIntegerv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetIntegerv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramiv) {
+ cmds::GetProgramiv& cmd = *GetBufferAs<cmds::GetProgramiv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramInfoLog) {
+ cmds::GetProgramInfoLog& cmd = *GetBufferAs<cmds::GetProgramInfoLog>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramInfoLog::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetRenderbufferParameteriv) {
+ cmds::GetRenderbufferParameteriv& cmd =
+ *GetBufferAs<cmds::GetRenderbufferParameteriv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetRenderbufferParameteriv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetShaderiv) {
+ cmds::GetShaderiv& cmd = *GetBufferAs<cmds::GetShaderiv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetShaderiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetShaderInfoLog) {
+ cmds::GetShaderInfoLog& cmd = *GetBufferAs<cmds::GetShaderInfoLog>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetShaderInfoLog::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetShaderPrecisionFormat) {
+ cmds::GetShaderPrecisionFormat& cmd =
+ *GetBufferAs<cmds::GetShaderPrecisionFormat>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetShaderPrecisionFormat::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.shadertype);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.precisiontype);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetShaderSource) {
+ cmds::GetShaderSource& cmd = *GetBufferAs<cmds::GetShaderSource>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetShaderSource::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetString) {
+ cmds::GetString& cmd = *GetBufferAs<cmds::GetString>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetString::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.name);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetTexParameterfv) {
+ cmds::GetTexParameterfv& cmd = *GetBufferAs<cmds::GetTexParameterfv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetTexParameterfv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetTexParameteriv) {
+ cmds::GetTexParameteriv& cmd = *GetBufferAs<cmds::GetTexParameteriv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetTexParameteriv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetUniformfv) {
+ cmds::GetUniformfv& cmd = *GetBufferAs<cmds::GetUniformfv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLint>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetUniformfv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.location);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetUniformiv) {
+ cmds::GetUniformiv& cmd = *GetBufferAs<cmds::GetUniformiv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLint>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetUniformiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.location);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetUniformLocation) {
+ cmds::GetUniformLocation& cmd = *GetBufferAs<cmds::GetUniformLocation>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetUniformLocation::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.location_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.location_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetVertexAttribfv) {
+ cmds::GetVertexAttribfv& cmd = *GetBufferAs<cmds::GetVertexAttribfv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetVertexAttribfv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetVertexAttribiv) {
+ cmds::GetVertexAttribiv& cmd = *GetBufferAs<cmds::GetVertexAttribiv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetVertexAttribiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetVertexAttribPointerv) {
+ cmds::GetVertexAttribPointerv& cmd =
+ *GetBufferAs<cmds::GetVertexAttribPointerv>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLenum>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetVertexAttribPointerv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.pointer_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.pointer_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Hint) {
+ cmds::Hint& cmd = *GetBufferAs<cmds::Hint>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Hint::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.mode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsBuffer) {
+ cmds::IsBuffer& cmd = *GetBufferAs<cmds::IsBuffer>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsBuffer::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buffer);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsEnabled) {
+ cmds::IsEnabled& cmd = *GetBufferAs<cmds::IsEnabled>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsEnabled::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.cap);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsFramebuffer) {
+ cmds::IsFramebuffer& cmd = *GetBufferAs<cmds::IsFramebuffer>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsFramebuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.framebuffer);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsProgram) {
+ cmds::IsProgram& cmd = *GetBufferAs<cmds::IsProgram>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsProgram::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsRenderbuffer) {
+ cmds::IsRenderbuffer& cmd = *GetBufferAs<cmds::IsRenderbuffer>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsRenderbuffer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.renderbuffer);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsShader) {
+ cmds::IsShader& cmd = *GetBufferAs<cmds::IsShader>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsShader::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsTexture) {
+ cmds::IsTexture& cmd = *GetBufferAs<cmds::IsTexture>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsTexture::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, LineWidth) {
+ cmds::LineWidth& cmd = *GetBufferAs<cmds::LineWidth>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLfloat>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::LineWidth::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLfloat>(11), cmd.width);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, LinkProgram) {
+ cmds::LinkProgram& cmd = *GetBufferAs<cmds::LinkProgram>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::LinkProgram::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, PixelStorei) {
+ cmds::PixelStorei& cmd = *GetBufferAs<cmds::PixelStorei>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::PixelStorei::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.param);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, PolygonOffset) {
+ cmds::PolygonOffset& cmd = *GetBufferAs<cmds::PolygonOffset>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLfloat>(11), static_cast<GLfloat>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::PolygonOffset::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLfloat>(11), cmd.factor);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.units);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ReadPixels) {
+ cmds::ReadPixels& cmd = *GetBufferAs<cmds::ReadPixels>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLenum>(15),
+ static_cast<GLenum>(16),
+ static_cast<uint32_t>(17),
+ static_cast<uint32_t>(18),
+ static_cast<uint32_t>(19),
+ static_cast<uint32_t>(20),
+ static_cast<GLboolean>(21));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ReadPixels::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(15), cmd.format);
+ EXPECT_EQ(static_cast<GLenum>(16), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(17), cmd.pixels_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(18), cmd.pixels_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(20), cmd.result_shm_offset);
+ EXPECT_EQ(static_cast<GLboolean>(21), cmd.async);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ReleaseShaderCompiler) {
+ cmds::ReleaseShaderCompiler& cmd =
+ *GetBufferAs<cmds::ReleaseShaderCompiler>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ReleaseShaderCompiler::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, RenderbufferStorage) {
+ cmds::RenderbufferStorage& cmd = *GetBufferAs<cmds::RenderbufferStorage>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLsizei>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::RenderbufferStorage::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, SampleCoverage) {
+ cmds::SampleCoverage& cmd = *GetBufferAs<cmds::SampleCoverage>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLclampf>(11), static_cast<GLboolean>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::SampleCoverage::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLclampf>(11), cmd.value);
+ EXPECT_EQ(static_cast<GLboolean>(12), cmd.invert);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Scissor) {
+ cmds::Scissor& cmd = *GetBufferAs<cmds::Scissor>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLsizei>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Scissor::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ShaderBinary) {
+ cmds::ShaderBinary& cmd = *GetBufferAs<cmds::ShaderBinary>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLsizei>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13),
+ static_cast<GLenum>(14),
+ static_cast<uint32_t>(15),
+ static_cast<uint32_t>(16),
+ static_cast<GLsizei>(17));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ShaderBinary::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(11), cmd.n);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.shaders_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.shaders_shm_offset);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.binaryformat);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.binary_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.binary_shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(17), cmd.length);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ShaderSourceBucket) {
+ cmds::ShaderSourceBucket& cmd = *GetBufferAs<cmds::ShaderSourceBucket>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ShaderSourceBucket::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.data_bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilFunc) {
+ cmds::StencilFunc& cmd = *GetBufferAs<cmds::StencilFunc>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLuint>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilFunc::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.func);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.ref);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilFuncSeparate) {
+ cmds::StencilFuncSeparate& cmd = *GetBufferAs<cmds::StencilFuncSeparate>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLint>(13),
+ static_cast<GLuint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilFuncSeparate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.face);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.func);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.ref);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.mask);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilMask) {
+ cmds::StencilMask& cmd = *GetBufferAs<cmds::StencilMask>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilMask::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.mask);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilMaskSeparate) {
+ cmds::StencilMaskSeparate& cmd = *GetBufferAs<cmds::StencilMaskSeparate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilMaskSeparate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.face);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.mask);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilOp) {
+ cmds::StencilOp& cmd = *GetBufferAs<cmds::StencilOp>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilOp::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.fail);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.zfail);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.zpass);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, StencilOpSeparate) {
+ cmds::StencilOpSeparate& cmd = *GetBufferAs<cmds::StencilOpSeparate>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLenum>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::StencilOpSeparate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.face);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.fail);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.zfail);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.zpass);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexImage2D) {
+ cmds::TexImage2D& cmd = *GetBufferAs<cmds::TexImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLenum>(16),
+ static_cast<GLenum>(17),
+ static_cast<uint32_t>(18),
+ static_cast<uint32_t>(19));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(16), cmd.format);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(18), cmd.pixels_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.pixels_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexParameterf) {
+ cmds::TexParameterf& cmd = *GetBufferAs<cmds::TexParameterf>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLfloat>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexParameterf::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.param);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexParameterfvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ };
+ cmds::TexParameterfvImmediate& cmd =
+ *GetBufferAs<cmds::TexParameterfvImmediate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexParameterfvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, TexParameteri) {
+ cmds::TexParameteri& cmd = *GetBufferAs<cmds::TexParameteri>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLint>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexParameteri::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.param);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexParameterivImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLint data[] = {
+ static_cast<GLint>(kSomeBaseValueToTestWith + 0),
+ };
+ cmds::TexParameterivImmediate& cmd =
+ *GetBufferAs<cmds::TexParameterivImmediate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexParameterivImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, TexSubImage2D) {
+ cmds::TexSubImage2D& cmd = *GetBufferAs<cmds::TexSubImage2D>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLsizei>(16),
+ static_cast<GLenum>(17),
+ static_cast<GLenum>(18),
+ static_cast<uint32_t>(19),
+ static_cast<uint32_t>(20),
+ static_cast<GLboolean>(21));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexSubImage2D::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.format);
+ EXPECT_EQ(static_cast<GLenum>(18), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.pixels_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(20), cmd.pixels_shm_offset);
+ EXPECT_EQ(static_cast<GLboolean>(21), cmd.internal);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform1f) {
+ cmds::Uniform1f& cmd = *GetBufferAs<cmds::Uniform1f>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLfloat>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform1f::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform1fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ };
+ cmds::Uniform1fvImmediate& cmd = *GetBufferAs<cmds::Uniform1fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 1;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform1fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform1i) {
+ cmds::Uniform1i& cmd = *GetBufferAs<cmds::Uniform1i>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform1i::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.x);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform1ivImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLint data[] = {
+ static_cast<GLint>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 1),
+ };
+ cmds::Uniform1ivImmediate& cmd = *GetBufferAs<cmds::Uniform1ivImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLint) * 1;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform1ivImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform2f) {
+ cmds::Uniform2f& cmd = *GetBufferAs<cmds::Uniform2f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform2f::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform2fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ };
+ cmds::Uniform2fvImmediate& cmd = *GetBufferAs<cmds::Uniform2fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 2;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform2fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform2i) {
+ cmds::Uniform2i& cmd = *GetBufferAs<cmds::Uniform2i>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform2i::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.y);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform2ivImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLint data[] = {
+ static_cast<GLint>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 3),
+ };
+ cmds::Uniform2ivImmediate& cmd = *GetBufferAs<cmds::Uniform2ivImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLint) * 2;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform2ivImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform3f) {
+ cmds::Uniform3f& cmd = *GetBufferAs<cmds::Uniform3f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13),
+ static_cast<GLfloat>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform3f::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLfloat>(14), cmd.z);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform3fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ };
+ cmds::Uniform3fvImmediate& cmd = *GetBufferAs<cmds::Uniform3fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 3;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform3fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform3i) {
+ cmds::Uniform3i& cmd = *GetBufferAs<cmds::Uniform3i>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform3i::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.z);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform3ivImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLint data[] = {
+ static_cast<GLint>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 5),
+ };
+ cmds::Uniform3ivImmediate& cmd = *GetBufferAs<cmds::Uniform3ivImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLint) * 3;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform3ivImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform4f) {
+ cmds::Uniform4f& cmd = *GetBufferAs<cmds::Uniform4f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13),
+ static_cast<GLfloat>(14),
+ static_cast<GLfloat>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform4f::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLfloat>(14), cmd.z);
+ EXPECT_EQ(static_cast<GLfloat>(15), cmd.w);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform4fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
+ };
+ cmds::Uniform4fvImmediate& cmd = *GetBufferAs<cmds::Uniform4fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 4;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform4fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, Uniform4i) {
+ cmds::Uniform4i& cmd = *GetBufferAs<cmds::Uniform4i>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform4i::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.location);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.z);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.w);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Uniform4ivImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLint data[] = {
+ static_cast<GLint>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLint>(kSomeBaseValueToTestWith + 7),
+ };
+ cmds::Uniform4ivImmediate& cmd = *GetBufferAs<cmds::Uniform4ivImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLint) * 4;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Uniform4ivImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, UniformMatrix2fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
+ };
+ cmds::UniformMatrix2fvImmediate& cmd =
+ *GetBufferAs<cmds::UniformMatrix2fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 4;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::UniformMatrix2fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, UniformMatrix3fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 17),
+ };
+ cmds::UniformMatrix3fvImmediate& cmd =
+ *GetBufferAs<cmds::UniformMatrix3fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 9;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::UniformMatrix3fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, UniformMatrix4fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 17),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 18),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 19),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 20),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 21),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 22),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 23),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 24),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 25),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 26),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 27),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 28),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 29),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 30),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 31),
+ };
+ cmds::UniformMatrix4fvImmediate& cmd =
+ *GetBufferAs<cmds::UniformMatrix4fvImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLfloat) * 16;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::UniformMatrix4fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(1), cmd.location);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, UseProgram) {
+ cmds::UseProgram& cmd = *GetBufferAs<cmds::UseProgram>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::UseProgram::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ValidateProgram) {
+ cmds::ValidateProgram& cmd = *GetBufferAs<cmds::ValidateProgram>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ValidateProgram::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib1f) {
+ cmds::VertexAttrib1f& cmd = *GetBufferAs<cmds::VertexAttrib1f>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLfloat>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib1f::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib1fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ };
+ cmds::VertexAttrib1fvImmediate& cmd =
+ *GetBufferAs<cmds::VertexAttrib1fvImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib1fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib2f) {
+ cmds::VertexAttrib2f& cmd = *GetBufferAs<cmds::VertexAttrib2f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib2f::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib2fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ };
+ cmds::VertexAttrib2fvImmediate& cmd =
+ *GetBufferAs<cmds::VertexAttrib2fvImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib2fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib3f) {
+ cmds::VertexAttrib3f& cmd = *GetBufferAs<cmds::VertexAttrib3f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13),
+ static_cast<GLfloat>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib3f::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLfloat>(14), cmd.z);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib3fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ };
+ cmds::VertexAttrib3fvImmediate& cmd =
+ *GetBufferAs<cmds::VertexAttrib3fvImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib3fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib4f) {
+ cmds::VertexAttrib4f& cmd = *GetBufferAs<cmds::VertexAttrib4f>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLfloat>(12),
+ static_cast<GLfloat>(13),
+ static_cast<GLfloat>(14),
+ static_cast<GLfloat>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib4f::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ EXPECT_EQ(static_cast<GLfloat>(12), cmd.x);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.y);
+ EXPECT_EQ(static_cast<GLfloat>(14), cmd.z);
+ EXPECT_EQ(static_cast<GLfloat>(15), cmd.w);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttrib4fvImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ };
+ cmds::VertexAttrib4fvImmediate& cmd =
+ *GetBufferAs<cmds::VertexAttrib4fvImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttrib4fvImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, VertexAttribPointer) {
+ cmds::VertexAttribPointer& cmd = *GetBufferAs<cmds::VertexAttribPointer>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLboolean>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLuint>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttribPointer::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.indx);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.size);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.type);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.normalized);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.stride);
+ EXPECT_EQ(static_cast<GLuint>(16), cmd.offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, Viewport) {
+ cmds::Viewport& cmd = *GetBufferAs<cmds::Viewport>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLsizei>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Viewport::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlitFramebufferCHROMIUM) {
+ cmds::BlitFramebufferCHROMIUM& cmd =
+ *GetBufferAs<cmds::BlitFramebufferCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15),
+ static_cast<GLint>(16),
+ static_cast<GLint>(17),
+ static_cast<GLint>(18),
+ static_cast<GLbitfield>(19),
+ static_cast<GLenum>(20));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlitFramebufferCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.srcX0);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.srcY0);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.srcX1);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.srcY1);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.dstX0);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.dstY0);
+ EXPECT_EQ(static_cast<GLint>(17), cmd.dstX1);
+ EXPECT_EQ(static_cast<GLint>(18), cmd.dstY1);
+ EXPECT_EQ(static_cast<GLbitfield>(19), cmd.mask);
+ EXPECT_EQ(static_cast<GLenum>(20), cmd.filter);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, RenderbufferStorageMultisampleCHROMIUM) {
+ cmds::RenderbufferStorageMultisampleCHROMIUM& cmd =
+ *GetBufferAs<cmds::RenderbufferStorageMultisampleCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15));
+ EXPECT_EQ(static_cast<uint32_t>(
+ cmds::RenderbufferStorageMultisampleCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.samples);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, RenderbufferStorageMultisampleEXT) {
+ cmds::RenderbufferStorageMultisampleEXT& cmd =
+ *GetBufferAs<cmds::RenderbufferStorageMultisampleEXT>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::RenderbufferStorageMultisampleEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.samples);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, FramebufferTexture2DMultisampleEXT) {
+ cmds::FramebufferTexture2DMultisampleEXT& cmd =
+ *GetBufferAs<cmds::FramebufferTexture2DMultisampleEXT>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14),
+ static_cast<GLsizei>(15));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::FramebufferTexture2DMultisampleEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.attachment);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.textarget);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.texture);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.samples);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexStorage2DEXT) {
+ cmds::TexStorage2DEXT& cmd = *GetBufferAs<cmds::TexStorage2DEXT>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexStorage2DEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.levels);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.internalFormat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GenQueriesEXTImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetBufferAs<cmds::GenQueriesEXTImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenQueriesEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DeleteQueriesEXTImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteQueriesEXTImmediate& cmd =
+ *GetBufferAs<cmds::DeleteQueriesEXTImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteQueriesEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, BeginQueryEXT) {
+ cmds::BeginQueryEXT& cmd = *GetBufferAs<cmds::BeginQueryEXT>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLuint>(12),
+ static_cast<uint32_t>(13),
+ static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BeginQueryEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.sync_data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, EndQueryEXT) {
+ cmds::EndQueryEXT& cmd = *GetBufferAs<cmds::EndQueryEXT>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EndQueryEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.submit_count);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, InsertEventMarkerEXT) {
+ cmds::InsertEventMarkerEXT& cmd = *GetBufferAs<cmds::InsertEventMarkerEXT>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::InsertEventMarkerEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, PushGroupMarkerEXT) {
+ cmds::PushGroupMarkerEXT& cmd = *GetBufferAs<cmds::PushGroupMarkerEXT>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::PushGroupMarkerEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, PopGroupMarkerEXT) {
+ cmds::PopGroupMarkerEXT& cmd = *GetBufferAs<cmds::PopGroupMarkerEXT>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::PopGroupMarkerEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GenVertexArraysOESImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenVertexArraysOESImmediate& cmd =
+ *GetBufferAs<cmds::GenVertexArraysOESImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenVertexArraysOESImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, DeleteVertexArraysOESImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteVertexArraysOESImmediate& cmd =
+ *GetBufferAs<cmds::DeleteVertexArraysOESImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteVertexArraysOESImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ // TODO(gman): Check that ids were inserted;
+}
+
+TEST_F(GLES2FormatTest, IsVertexArrayOES) {
+ cmds::IsVertexArrayOES& cmd = *GetBufferAs<cmds::IsVertexArrayOES>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsVertexArrayOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.array);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindVertexArrayOES) {
+ cmds::BindVertexArrayOES& cmd = *GetBufferAs<cmds::BindVertexArrayOES>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindVertexArrayOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.array);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, SwapBuffers) {
+ cmds::SwapBuffers& cmd = *GetBufferAs<cmds::SwapBuffers>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::SwapBuffers::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetMaxValueInBufferCHROMIUM) {
+ cmds::GetMaxValueInBufferCHROMIUM& cmd =
+ *GetBufferAs<cmds::GetMaxValueInBufferCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14),
+ static_cast<uint32_t>(15),
+ static_cast<uint32_t>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetMaxValueInBufferCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buffer_id);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.count);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.type);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.offset);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, EnableFeatureCHROMIUM) {
+ cmds::EnableFeatureCHROMIUM& cmd =
+ *GetBufferAs<cmds::EnableFeatureCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EnableFeatureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ResizeCHROMIUM) {
+ cmds::ResizeCHROMIUM& cmd = *GetBufferAs<cmds::ResizeCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLuint>(12),
+ static_cast<GLfloat>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ResizeCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.width);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.height);
+ EXPECT_EQ(static_cast<GLfloat>(13), cmd.scale_factor);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetRequestableExtensionsCHROMIUM) {
+ cmds::GetRequestableExtensionsCHROMIUM& cmd =
+ *GetBufferAs<cmds::GetRequestableExtensionsCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<uint32_t>(11));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::GetRequestableExtensionsCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, RequestExtensionCHROMIUM) {
+ cmds::RequestExtensionCHROMIUM& cmd =
+ *GetBufferAs<cmds::RequestExtensionCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<uint32_t>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::RequestExtensionCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetMultipleIntegervCHROMIUM) {
+ cmds::GetMultipleIntegervCHROMIUM& cmd =
+ *GetBufferAs<cmds::GetMultipleIntegervCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<uint32_t>(11),
+ static_cast<uint32_t>(12),
+ static_cast<GLuint>(13),
+ static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15),
+ static_cast<GLsizeiptr>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetMultipleIntegervCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.pnames_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.pnames_shm_offset);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.count);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.results_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.results_shm_offset);
+ EXPECT_EQ(static_cast<GLsizeiptr>(16), cmd.size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramInfoCHROMIUM) {
+ cmds::GetProgramInfoCHROMIUM& cmd =
+ *GetBufferAs<cmds::GetProgramInfoCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramInfoCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetTranslatedShaderSourceANGLE) {
+ cmds::GetTranslatedShaderSourceANGLE& cmd =
+ *GetBufferAs<cmds::GetTranslatedShaderSourceANGLE>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetTranslatedShaderSourceANGLE::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.shader);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, PostSubBufferCHROMIUM) {
+ cmds::PostSubBufferCHROMIUM& cmd =
+ *GetBufferAs<cmds::PostSubBufferCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::PostSubBufferCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.y);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.width);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TexImageIOSurface2DCHROMIUM) {
+ cmds::TexImageIOSurface2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::TexImageIOSurface2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLuint>(14),
+ static_cast<GLuint>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexImageIOSurface2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.height);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.ioSurfaceId);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.plane);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, CopyTextureCHROMIUM) {
+ cmds::CopyTextureCHROMIUM& cmd = *GetBufferAs<cmds::CopyTextureCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15),
+ static_cast<GLenum>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CopyTextureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.source_id);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLenum>(16), cmd.dest_type);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DrawArraysInstancedANGLE) {
+ cmds::DrawArraysInstancedANGLE& cmd =
+ *GetBufferAs<cmds::DrawArraysInstancedANGLE>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLsizei>(13),
+ static_cast<GLsizei>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DrawArraysInstancedANGLE::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.first);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.count);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.primcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DrawElementsInstancedANGLE) {
+ cmds::DrawElementsInstancedANGLE& cmd =
+ *GetBufferAs<cmds::DrawElementsInstancedANGLE>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLsizei>(12),
+ static_cast<GLenum>(13),
+ static_cast<GLuint>(14),
+ static_cast<GLsizei>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DrawElementsInstancedANGLE::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.count);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.type);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.index_offset);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.primcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, VertexAttribDivisorANGLE) {
+ cmds::VertexAttribDivisorANGLE& cmd =
+ *GetBufferAs<cmds::VertexAttribDivisorANGLE>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::VertexAttribDivisorANGLE::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.index);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.divisor);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+// TODO(gman): Write test for GenMailboxCHROMIUM
+TEST_F(GLES2FormatTest, ProduceTextureCHROMIUMImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLbyte data[] = {
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 17),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 18),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 19),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 20),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 21),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 22),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 23),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 24),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 25),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 26),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 27),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 28),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 29),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 30),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 31),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 32),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 33),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 34),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 35),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 36),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 37),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 38),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 39),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 40),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 41),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 42),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 43),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 44),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 45),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 46),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 47),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 48),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 49),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 50),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 51),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 52),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 53),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 54),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 55),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 56),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 57),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 58),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 59),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 60),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 61),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 62),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 63),
+ };
+ cmds::ProduceTextureCHROMIUMImmediate& cmd =
+ *GetBufferAs<cmds::ProduceTextureCHROMIUMImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), data);
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::ProduceTextureCHROMIUMImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, ProduceTextureDirectCHROMIUMImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLbyte data[] = {
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 17),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 18),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 19),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 20),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 21),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 22),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 23),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 24),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 25),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 26),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 27),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 28),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 29),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 30),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 31),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 32),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 33),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 34),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 35),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 36),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 37),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 38),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 39),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 40),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 41),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 42),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 43),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 44),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 45),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 46),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 47),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 48),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 49),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 50),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 51),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 52),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 53),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 54),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 55),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 56),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 57),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 58),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 59),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 60),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 61),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 62),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 63),
+ };
+ cmds::ProduceTextureDirectCHROMIUMImmediate& cmd =
+ *GetBufferAs<cmds::ProduceTextureDirectCHROMIUMImmediate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12), data);
+ EXPECT_EQ(static_cast<uint32_t>(
+ cmds::ProduceTextureDirectCHROMIUMImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.target);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, ConsumeTextureCHROMIUMImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLbyte data[] = {
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 17),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 18),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 19),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 20),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 21),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 22),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 23),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 24),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 25),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 26),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 27),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 28),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 29),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 30),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 31),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 32),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 33),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 34),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 35),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 36),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 37),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 38),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 39),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 40),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 41),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 42),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 43),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 44),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 45),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 46),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 47),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 48),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 49),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 50),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 51),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 52),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 53),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 54),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 55),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 56),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 57),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 58),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 59),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 60),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 61),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 62),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 63),
+ };
+ cmds::ConsumeTextureCHROMIUMImmediate& cmd =
+ *GetBufferAs<cmds::ConsumeTextureCHROMIUMImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), data);
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::ConsumeTextureCHROMIUMImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+// TODO(gman): Write test for CreateAndConsumeTextureCHROMIUMImmediate
+TEST_F(GLES2FormatTest, BindUniformLocationCHROMIUMBucket) {
+ cmds::BindUniformLocationCHROMIUMBucket& cmd =
+ *GetBufferAs<cmds::BindUniformLocationCHROMIUMBucket>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLint>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::BindUniformLocationCHROMIUMBucket::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.location);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BindTexImage2DCHROMIUM) {
+ cmds::BindTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::BindTexImage2DCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.imageId);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ReleaseTexImage2DCHROMIUM) {
+ cmds::ReleaseTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::ReleaseTexImage2DCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ReleaseTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.imageId);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TraceBeginCHROMIUM) {
+ cmds::TraceBeginCHROMIUM& cmd = *GetBufferAs<cmds::TraceBeginCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TraceBeginCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.bucket_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, TraceEndCHROMIUM) {
+ cmds::TraceEndCHROMIUM& cmd = *GetBufferAs<cmds::TraceEndCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TraceEndCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, AsyncTexSubImage2DCHROMIUM) {
+ cmds::AsyncTexSubImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::AsyncTexSubImage2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLsizei>(16),
+ static_cast<GLenum>(17),
+ static_cast<GLenum>(18),
+ static_cast<uint32_t>(19),
+ static_cast<uint32_t>(20),
+ static_cast<uint32_t>(21),
+ static_cast<uint32_t>(22),
+ static_cast<uint32_t>(23));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::AsyncTexSubImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.format);
+ EXPECT_EQ(static_cast<GLenum>(18), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(20), cmd.data_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(21), cmd.async_upload_token);
+ EXPECT_EQ(static_cast<uint32_t>(22), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(23), cmd.sync_data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, AsyncTexImage2DCHROMIUM) {
+ cmds::AsyncTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::AsyncTexImage2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLenum>(11),
+ static_cast<GLint>(12),
+ static_cast<GLint>(13),
+ static_cast<GLsizei>(14),
+ static_cast<GLsizei>(15),
+ static_cast<GLenum>(16),
+ static_cast<GLenum>(17),
+ static_cast<uint32_t>(18),
+ static_cast<uint32_t>(19),
+ static_cast<uint32_t>(20),
+ static_cast<uint32_t>(21),
+ static_cast<uint32_t>(22));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::AsyncTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.level);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.height);
+ EXPECT_EQ(static_cast<GLenum>(16), cmd.format);
+ EXPECT_EQ(static_cast<GLenum>(17), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(18), cmd.pixels_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(19), cmd.pixels_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(20), cmd.async_upload_token);
+ EXPECT_EQ(static_cast<uint32_t>(21), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(22), cmd.sync_data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, WaitAsyncTexImage2DCHROMIUM) {
+ cmds::WaitAsyncTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::WaitAsyncTexImage2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::WaitAsyncTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, WaitAllAsyncTexImage2DCHROMIUM) {
+ cmds::WaitAllAsyncTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::WaitAllAsyncTexImage2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::WaitAllAsyncTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DiscardFramebufferEXTImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLenum data[] = {
+ static_cast<GLenum>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLenum>(kSomeBaseValueToTestWith + 1),
+ };
+ cmds::DiscardFramebufferEXTImmediate& cmd =
+ *GetBufferAs<cmds::DiscardFramebufferEXTImmediate>();
+ const GLsizei kNumElements = 2;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLenum) * 1;
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(1), static_cast<GLsizei>(2), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DiscardFramebufferEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(1), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(2), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, LoseContextCHROMIUM) {
+ cmds::LoseContextCHROMIUM& cmd = *GetBufferAs<cmds::LoseContextCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::LoseContextCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.current);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.other);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+// TODO(gman): Write test for InsertSyncPointCHROMIUM
+TEST_F(GLES2FormatTest, WaitSyncPointCHROMIUM) {
+ cmds::WaitSyncPointCHROMIUM& cmd =
+ *GetBufferAs<cmds::WaitSyncPointCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::WaitSyncPointCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.sync_point);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DrawBuffersEXTImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLenum data[] = {
+ static_cast<GLenum>(kSomeBaseValueToTestWith + 0),
+ };
+ cmds::DrawBuffersEXTImmediate& cmd =
+ *GetBufferAs<cmds::DrawBuffersEXTImmediate>();
+ const GLsizei kNumElements = 1;
+ const size_t kExpectedCmdSize =
+ sizeof(cmd) + kNumElements * sizeof(GLenum) * 1;
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(1), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DrawBuffersEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(1), cmd.count);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, DiscardBackbufferCHROMIUM) {
+ cmds::DiscardBackbufferCHROMIUM& cmd =
+ *GetBufferAs<cmds::DiscardBackbufferCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DiscardBackbufferCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ScheduleOverlayPlaneCHROMIUM) {
+ cmds::ScheduleOverlayPlaneCHROMIUM& cmd =
+ *GetBufferAs<cmds::ScheduleOverlayPlaneCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd,
+ static_cast<GLint>(11),
+ static_cast<GLenum>(12),
+ static_cast<GLuint>(13),
+ static_cast<GLint>(14),
+ static_cast<GLint>(15),
+ static_cast<GLint>(16),
+ static_cast<GLint>(17),
+ static_cast<GLfloat>(18),
+ static_cast<GLfloat>(19),
+ static_cast<GLfloat>(20),
+ static_cast<GLfloat>(21));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ScheduleOverlayPlaneCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.plane_z_order);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.plane_transform);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.overlay_texture_id);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.bounds_x);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.bounds_y);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.bounds_width);
+ EXPECT_EQ(static_cast<GLint>(17), cmd.bounds_height);
+ EXPECT_EQ(static_cast<GLfloat>(18), cmd.uv_x);
+ EXPECT_EQ(static_cast<GLfloat>(19), cmd.uv_y);
+ EXPECT_EQ(static_cast<GLfloat>(20), cmd.uv_width);
+ EXPECT_EQ(static_cast<GLfloat>(21), cmd.uv_height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MatrixLoadfCHROMIUMImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLfloat data[] = {
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLfloat>(kSomeBaseValueToTestWith + 15),
+ };
+ cmds::MatrixLoadfCHROMIUMImmediate& cmd =
+ *GetBufferAs<cmds::MatrixLoadfCHROMIUMImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MatrixLoadfCHROMIUMImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.matrixMode);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+ // TODO(gman): Check that data was inserted;
+}
+
+TEST_F(GLES2FormatTest, MatrixLoadIdentityCHROMIUM) {
+ cmds::MatrixLoadIdentityCHROMIUM& cmd =
+ *GetBufferAs<cmds::MatrixLoadIdentityCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MatrixLoadIdentityCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.matrixMode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_ids.h b/gpu/command_buffer/common/gles2_cmd_ids.h
new file mode 100644
index 0000000..b701c91
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_ids.h
@@ -0,0 +1,23 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the GLES2 command buffer commands.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_H_
+
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+
+namespace gpu {
+namespace gles2 {
+
+#include "gpu/command_buffer/common/gles2_cmd_ids_autogen.h"
+
+const char* GetCommandName(CommandId command_id);
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_H_
+
diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
new file mode 100644
index 0000000..030ada2
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -0,0 +1,222 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_AUTOGEN_H_
+
+#define GLES2_COMMAND_LIST(OP) \
+ OP(ActiveTexture) /* 256 */ \
+ OP(AttachShader) /* 257 */ \
+ OP(BindAttribLocationBucket) /* 258 */ \
+ OP(BindBuffer) /* 259 */ \
+ OP(BindFramebuffer) /* 260 */ \
+ OP(BindRenderbuffer) /* 261 */ \
+ OP(BindTexture) /* 262 */ \
+ OP(BlendColor) /* 263 */ \
+ OP(BlendEquation) /* 264 */ \
+ OP(BlendEquationSeparate) /* 265 */ \
+ OP(BlendFunc) /* 266 */ \
+ OP(BlendFuncSeparate) /* 267 */ \
+ OP(BufferData) /* 268 */ \
+ OP(BufferSubData) /* 269 */ \
+ OP(CheckFramebufferStatus) /* 270 */ \
+ OP(Clear) /* 271 */ \
+ OP(ClearColor) /* 272 */ \
+ OP(ClearDepthf) /* 273 */ \
+ OP(ClearStencil) /* 274 */ \
+ OP(ColorMask) /* 275 */ \
+ OP(CompileShader) /* 276 */ \
+ OP(CompressedTexImage2DBucket) /* 277 */ \
+ OP(CompressedTexImage2D) /* 278 */ \
+ OP(CompressedTexSubImage2DBucket) /* 279 */ \
+ OP(CompressedTexSubImage2D) /* 280 */ \
+ OP(CopyTexImage2D) /* 281 */ \
+ OP(CopyTexSubImage2D) /* 282 */ \
+ OP(CreateProgram) /* 283 */ \
+ OP(CreateShader) /* 284 */ \
+ OP(CullFace) /* 285 */ \
+ OP(DeleteBuffersImmediate) /* 286 */ \
+ OP(DeleteFramebuffersImmediate) /* 287 */ \
+ OP(DeleteProgram) /* 288 */ \
+ OP(DeleteRenderbuffersImmediate) /* 289 */ \
+ OP(DeleteShader) /* 290 */ \
+ OP(DeleteTexturesImmediate) /* 291 */ \
+ OP(DepthFunc) /* 292 */ \
+ OP(DepthMask) /* 293 */ \
+ OP(DepthRangef) /* 294 */ \
+ OP(DetachShader) /* 295 */ \
+ OP(Disable) /* 296 */ \
+ OP(DisableVertexAttribArray) /* 297 */ \
+ OP(DrawArrays) /* 298 */ \
+ OP(DrawElements) /* 299 */ \
+ OP(Enable) /* 300 */ \
+ OP(EnableVertexAttribArray) /* 301 */ \
+ OP(Finish) /* 302 */ \
+ OP(Flush) /* 303 */ \
+ OP(FramebufferRenderbuffer) /* 304 */ \
+ OP(FramebufferTexture2D) /* 305 */ \
+ OP(FrontFace) /* 306 */ \
+ OP(GenBuffersImmediate) /* 307 */ \
+ OP(GenerateMipmap) /* 308 */ \
+ OP(GenFramebuffersImmediate) /* 309 */ \
+ OP(GenRenderbuffersImmediate) /* 310 */ \
+ OP(GenTexturesImmediate) /* 311 */ \
+ OP(GetActiveAttrib) /* 312 */ \
+ OP(GetActiveUniform) /* 313 */ \
+ OP(GetAttachedShaders) /* 314 */ \
+ OP(GetAttribLocation) /* 315 */ \
+ OP(GetBooleanv) /* 316 */ \
+ OP(GetBufferParameteriv) /* 317 */ \
+ OP(GetError) /* 318 */ \
+ OP(GetFloatv) /* 319 */ \
+ OP(GetFramebufferAttachmentParameteriv) /* 320 */ \
+ OP(GetIntegerv) /* 321 */ \
+ OP(GetProgramiv) /* 322 */ \
+ OP(GetProgramInfoLog) /* 323 */ \
+ OP(GetRenderbufferParameteriv) /* 324 */ \
+ OP(GetShaderiv) /* 325 */ \
+ OP(GetShaderInfoLog) /* 326 */ \
+ OP(GetShaderPrecisionFormat) /* 327 */ \
+ OP(GetShaderSource) /* 328 */ \
+ OP(GetString) /* 329 */ \
+ OP(GetTexParameterfv) /* 330 */ \
+ OP(GetTexParameteriv) /* 331 */ \
+ OP(GetUniformfv) /* 332 */ \
+ OP(GetUniformiv) /* 333 */ \
+ OP(GetUniformLocation) /* 334 */ \
+ OP(GetVertexAttribfv) /* 335 */ \
+ OP(GetVertexAttribiv) /* 336 */ \
+ OP(GetVertexAttribPointerv) /* 337 */ \
+ OP(Hint) /* 338 */ \
+ OP(IsBuffer) /* 339 */ \
+ OP(IsEnabled) /* 340 */ \
+ OP(IsFramebuffer) /* 341 */ \
+ OP(IsProgram) /* 342 */ \
+ OP(IsRenderbuffer) /* 343 */ \
+ OP(IsShader) /* 344 */ \
+ OP(IsTexture) /* 345 */ \
+ OP(LineWidth) /* 346 */ \
+ OP(LinkProgram) /* 347 */ \
+ OP(PixelStorei) /* 348 */ \
+ OP(PolygonOffset) /* 349 */ \
+ OP(ReadPixels) /* 350 */ \
+ OP(ReleaseShaderCompiler) /* 351 */ \
+ OP(RenderbufferStorage) /* 352 */ \
+ OP(SampleCoverage) /* 353 */ \
+ OP(Scissor) /* 354 */ \
+ OP(ShaderBinary) /* 355 */ \
+ OP(ShaderSourceBucket) /* 356 */ \
+ OP(StencilFunc) /* 357 */ \
+ OP(StencilFuncSeparate) /* 358 */ \
+ OP(StencilMask) /* 359 */ \
+ OP(StencilMaskSeparate) /* 360 */ \
+ OP(StencilOp) /* 361 */ \
+ OP(StencilOpSeparate) /* 362 */ \
+ OP(TexImage2D) /* 363 */ \
+ OP(TexParameterf) /* 364 */ \
+ OP(TexParameterfvImmediate) /* 365 */ \
+ OP(TexParameteri) /* 366 */ \
+ OP(TexParameterivImmediate) /* 367 */ \
+ OP(TexSubImage2D) /* 368 */ \
+ OP(Uniform1f) /* 369 */ \
+ OP(Uniform1fvImmediate) /* 370 */ \
+ OP(Uniform1i) /* 371 */ \
+ OP(Uniform1ivImmediate) /* 372 */ \
+ OP(Uniform2f) /* 373 */ \
+ OP(Uniform2fvImmediate) /* 374 */ \
+ OP(Uniform2i) /* 375 */ \
+ OP(Uniform2ivImmediate) /* 376 */ \
+ OP(Uniform3f) /* 377 */ \
+ OP(Uniform3fvImmediate) /* 378 */ \
+ OP(Uniform3i) /* 379 */ \
+ OP(Uniform3ivImmediate) /* 380 */ \
+ OP(Uniform4f) /* 381 */ \
+ OP(Uniform4fvImmediate) /* 382 */ \
+ OP(Uniform4i) /* 383 */ \
+ OP(Uniform4ivImmediate) /* 384 */ \
+ OP(UniformMatrix2fvImmediate) /* 385 */ \
+ OP(UniformMatrix3fvImmediate) /* 386 */ \
+ OP(UniformMatrix4fvImmediate) /* 387 */ \
+ OP(UseProgram) /* 388 */ \
+ OP(ValidateProgram) /* 389 */ \
+ OP(VertexAttrib1f) /* 390 */ \
+ OP(VertexAttrib1fvImmediate) /* 391 */ \
+ OP(VertexAttrib2f) /* 392 */ \
+ OP(VertexAttrib2fvImmediate) /* 393 */ \
+ OP(VertexAttrib3f) /* 394 */ \
+ OP(VertexAttrib3fvImmediate) /* 395 */ \
+ OP(VertexAttrib4f) /* 396 */ \
+ OP(VertexAttrib4fvImmediate) /* 397 */ \
+ OP(VertexAttribPointer) /* 398 */ \
+ OP(Viewport) /* 399 */ \
+ OP(BlitFramebufferCHROMIUM) /* 400 */ \
+ OP(RenderbufferStorageMultisampleCHROMIUM) /* 401 */ \
+ OP(RenderbufferStorageMultisampleEXT) /* 402 */ \
+ OP(FramebufferTexture2DMultisampleEXT) /* 403 */ \
+ OP(TexStorage2DEXT) /* 404 */ \
+ OP(GenQueriesEXTImmediate) /* 405 */ \
+ OP(DeleteQueriesEXTImmediate) /* 406 */ \
+ OP(BeginQueryEXT) /* 407 */ \
+ OP(EndQueryEXT) /* 408 */ \
+ OP(InsertEventMarkerEXT) /* 409 */ \
+ OP(PushGroupMarkerEXT) /* 410 */ \
+ OP(PopGroupMarkerEXT) /* 411 */ \
+ OP(GenVertexArraysOESImmediate) /* 412 */ \
+ OP(DeleteVertexArraysOESImmediate) /* 413 */ \
+ OP(IsVertexArrayOES) /* 414 */ \
+ OP(BindVertexArrayOES) /* 415 */ \
+ OP(SwapBuffers) /* 416 */ \
+ OP(GetMaxValueInBufferCHROMIUM) /* 417 */ \
+ OP(EnableFeatureCHROMIUM) /* 418 */ \
+ OP(ResizeCHROMIUM) /* 419 */ \
+ OP(GetRequestableExtensionsCHROMIUM) /* 420 */ \
+ OP(RequestExtensionCHROMIUM) /* 421 */ \
+ OP(GetMultipleIntegervCHROMIUM) /* 422 */ \
+ OP(GetProgramInfoCHROMIUM) /* 423 */ \
+ OP(GetTranslatedShaderSourceANGLE) /* 424 */ \
+ OP(PostSubBufferCHROMIUM) /* 425 */ \
+ OP(TexImageIOSurface2DCHROMIUM) /* 426 */ \
+ OP(CopyTextureCHROMIUM) /* 427 */ \
+ OP(DrawArraysInstancedANGLE) /* 428 */ \
+ OP(DrawElementsInstancedANGLE) /* 429 */ \
+ OP(VertexAttribDivisorANGLE) /* 430 */ \
+ OP(GenMailboxCHROMIUM) /* 431 */ \
+ OP(ProduceTextureCHROMIUMImmediate) /* 432 */ \
+ OP(ProduceTextureDirectCHROMIUMImmediate) /* 433 */ \
+ OP(ConsumeTextureCHROMIUMImmediate) /* 434 */ \
+ OP(CreateAndConsumeTextureCHROMIUMImmediate) /* 435 */ \
+ OP(BindUniformLocationCHROMIUMBucket) /* 436 */ \
+ OP(BindTexImage2DCHROMIUM) /* 437 */ \
+ OP(ReleaseTexImage2DCHROMIUM) /* 438 */ \
+ OP(TraceBeginCHROMIUM) /* 439 */ \
+ OP(TraceEndCHROMIUM) /* 440 */ \
+ OP(AsyncTexSubImage2DCHROMIUM) /* 441 */ \
+ OP(AsyncTexImage2DCHROMIUM) /* 442 */ \
+ OP(WaitAsyncTexImage2DCHROMIUM) /* 443 */ \
+ OP(WaitAllAsyncTexImage2DCHROMIUM) /* 444 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 445 */ \
+ OP(LoseContextCHROMIUM) /* 446 */ \
+ OP(InsertSyncPointCHROMIUM) /* 447 */ \
+ OP(WaitSyncPointCHROMIUM) /* 448 */ \
+ OP(DrawBuffersEXTImmediate) /* 449 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 450 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 451 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 452 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 453 */
+
+enum CommandId {
+ kStartPoint = cmd::kLastCommonId, // All GLES2 commands start after this.
+#define GLES2_CMD_OP(name) k##name,
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+#undef GLES2_CMD_OP
+ kNumCommands
+};
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_utils.cc b/gpu/command_buffer/common/gles2_cmd_utils.cc
new file mode 100644
index 0000000..3b5097c
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -0,0 +1,937 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is here so other GLES2 related files can have a common set of
+// includes where appropriate.
+
+#include <sstream>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace gl_error_bit {
+enum GLErrorBit {
+ kNoError = 0,
+ kInvalidEnum = (1 << 0),
+ kInvalidValue = (1 << 1),
+ kInvalidOperation = (1 << 2),
+ kOutOfMemory = (1 << 3),
+ kInvalidFrameBufferOperation = (1 << 4)
+};
+}
+
+int GLES2Util::GLGetNumValuesReturned(int id) const {
+ switch (id) {
+ // -- glGetBooleanv, glGetFloatv, glGetIntergerv
+ case GL_ACTIVE_TEXTURE:
+ return 1;
+ case GL_ALIASED_LINE_WIDTH_RANGE:
+ return 2;
+ case GL_ALIASED_POINT_SIZE_RANGE:
+ return 2;
+ case GL_ALPHA_BITS:
+ return 1;
+ case GL_ARRAY_BUFFER_BINDING:
+ return 1;
+ case GL_BLEND:
+ return 1;
+ case GL_BLEND_COLOR:
+ return 4;
+ case GL_BLEND_DST_ALPHA:
+ return 1;
+ case GL_BLEND_DST_RGB:
+ return 1;
+ case GL_BLEND_EQUATION_ALPHA:
+ return 1;
+ case GL_BLEND_EQUATION_RGB:
+ return 1;
+ case GL_BLEND_SRC_ALPHA:
+ return 1;
+ case GL_BLEND_SRC_RGB:
+ return 1;
+ case GL_BLUE_BITS:
+ return 1;
+ case GL_COLOR_CLEAR_VALUE:
+ return 4;
+ case GL_COLOR_WRITEMASK:
+ return 4;
+ case GL_COMPRESSED_TEXTURE_FORMATS:
+ return num_compressed_texture_formats_;
+ case GL_CULL_FACE:
+ return 1;
+ case GL_CULL_FACE_MODE:
+ return 1;
+ case GL_CURRENT_PROGRAM:
+ return 1;
+ case GL_DEPTH_BITS:
+ return 1;
+ case GL_DEPTH_CLEAR_VALUE:
+ return 1;
+ case GL_DEPTH_FUNC:
+ return 1;
+ case GL_DEPTH_RANGE:
+ return 2;
+ case GL_DEPTH_TEST:
+ return 1;
+ case GL_DEPTH_WRITEMASK:
+ return 1;
+ case GL_DITHER:
+ return 1;
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ return 1;
+ case GL_FRAMEBUFFER_BINDING:
+ return 1;
+ case GL_FRONT_FACE:
+ return 1;
+ case GL_GENERATE_MIPMAP_HINT:
+ return 1;
+ case GL_GREEN_BITS:
+ return 1;
+ case GL_IMPLEMENTATION_COLOR_READ_FORMAT:
+ return 1;
+ case GL_IMPLEMENTATION_COLOR_READ_TYPE:
+ return 1;
+ case GL_LINE_WIDTH:
+ return 1;
+ case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
+ return 1;
+ case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
+ return 1;
+ case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ return 1;
+ case GL_MAX_RENDERBUFFER_SIZE:
+ return 1;
+ case GL_MAX_TEXTURE_IMAGE_UNITS:
+ return 1;
+ case GL_MAX_TEXTURE_SIZE:
+ return 1;
+ case GL_MAX_VARYING_VECTORS:
+ return 1;
+ case GL_MAX_VERTEX_ATTRIBS:
+ return 1;
+ case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
+ return 1;
+ case GL_MAX_VERTEX_UNIFORM_VECTORS:
+ return 1;
+ case GL_MAX_VIEWPORT_DIMS:
+ return 2;
+ case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ return 1;
+ case GL_NUM_SHADER_BINARY_FORMATS:
+ return 1;
+ case GL_PACK_ALIGNMENT:
+ return 1;
+ case GL_PACK_REVERSE_ROW_ORDER_ANGLE:
+ return 1;
+ case GL_POLYGON_OFFSET_FACTOR:
+ return 1;
+ case GL_POLYGON_OFFSET_FILL:
+ return 1;
+ case GL_POLYGON_OFFSET_UNITS:
+ return 1;
+ case GL_RED_BITS:
+ return 1;
+ case GL_RENDERBUFFER_BINDING:
+ return 1;
+ case GL_SAMPLE_BUFFERS:
+ return 1;
+ case GL_SAMPLE_COVERAGE_INVERT:
+ return 1;
+ case GL_SAMPLE_COVERAGE_VALUE:
+ return 1;
+ case GL_SAMPLES:
+ return 1;
+ case GL_SCISSOR_BOX:
+ return 4;
+ case GL_SCISSOR_TEST:
+ return 1;
+ case GL_SHADER_BINARY_FORMATS:
+ return num_shader_binary_formats_;
+ case GL_SHADER_COMPILER:
+ return 1;
+ case GL_STENCIL_BACK_FAIL:
+ return 1;
+ case GL_STENCIL_BACK_FUNC:
+ return 1;
+ case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+ return 1;
+ case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+ return 1;
+ case GL_STENCIL_BACK_REF:
+ return 1;
+ case GL_STENCIL_BACK_VALUE_MASK:
+ return 1;
+ case GL_STENCIL_BACK_WRITEMASK:
+ return 1;
+ case GL_STENCIL_BITS:
+ return 1;
+ case GL_STENCIL_CLEAR_VALUE:
+ return 1;
+ case GL_STENCIL_FAIL:
+ return 1;
+ case GL_STENCIL_FUNC:
+ return 1;
+ case GL_STENCIL_PASS_DEPTH_FAIL:
+ return 1;
+ case GL_STENCIL_PASS_DEPTH_PASS:
+ return 1;
+ case GL_STENCIL_REF:
+ return 1;
+ case GL_STENCIL_TEST:
+ return 1;
+ case GL_STENCIL_VALUE_MASK:
+ return 1;
+ case GL_STENCIL_WRITEMASK:
+ return 1;
+ case GL_SUBPIXEL_BITS:
+ return 1;
+ case GL_TEXTURE_BINDING_2D:
+ return 1;
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ return 1;
+ case GL_TEXTURE_BINDING_EXTERNAL_OES:
+ return 1;
+ case GL_TEXTURE_BINDING_RECTANGLE_ARB:
+ return 1;
+ case GL_TEXTURE_IMMUTABLE_FORMAT_EXT:
+ return 1;
+ case GL_UNPACK_ALIGNMENT:
+ return 1;
+ case GL_VIEWPORT:
+ return 4;
+ // -- glGetBooleanv, glGetFloatv, glGetIntergerv with
+ // GL_CHROMIUM_framebuffer_multisample
+ case GL_MAX_SAMPLES_EXT:
+ return 1;
+ case GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT:
+ return 1;
+
+ // -- glGetBufferParameteriv
+ case GL_BUFFER_SIZE:
+ return 1;
+ case GL_BUFFER_USAGE:
+ return 1;
+
+ // -- glGetFramebufferAttachmentParameteriv
+ case GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE:
+ return 1;
+ case GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME:
+ return 1;
+ case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL:
+ return 1;
+ case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE:
+ return 1;
+ // -- glGetFramebufferAttachmentParameteriv with
+ // GL_EXT_multisampled_render_to_texture
+ case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT:
+ return 1;
+
+ // -- glGetProgramiv
+ case GL_DELETE_STATUS:
+ return 1;
+ case GL_LINK_STATUS:
+ return 1;
+ case GL_VALIDATE_STATUS:
+ return 1;
+ case GL_INFO_LOG_LENGTH:
+ return 1;
+ case GL_ATTACHED_SHADERS:
+ return 1;
+ case GL_ACTIVE_ATTRIBUTES:
+ return 1;
+ case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+ return 1;
+ case GL_ACTIVE_UNIFORMS:
+ return 1;
+ case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+ return 1;
+
+
+ // -- glGetRenderbufferAttachmentParameteriv
+ case GL_RENDERBUFFER_WIDTH:
+ return 1;
+ case GL_RENDERBUFFER_HEIGHT:
+ return 1;
+ case GL_RENDERBUFFER_INTERNAL_FORMAT:
+ return 1;
+ case GL_RENDERBUFFER_RED_SIZE:
+ return 1;
+ case GL_RENDERBUFFER_GREEN_SIZE:
+ return 1;
+ case GL_RENDERBUFFER_BLUE_SIZE:
+ return 1;
+ case GL_RENDERBUFFER_ALPHA_SIZE:
+ return 1;
+ case GL_RENDERBUFFER_DEPTH_SIZE:
+ return 1;
+ case GL_RENDERBUFFER_STENCIL_SIZE:
+ return 1;
+ // -- glGetRenderbufferAttachmentParameteriv with
+ // GL_EXT_multisampled_render_to_texture
+ case GL_RENDERBUFFER_SAMPLES_EXT:
+ return 1;
+
+ // -- glGetShaderiv
+ case GL_SHADER_TYPE:
+ return 1;
+ // Already defined under glGetFramebufferAttachemntParameteriv.
+ // case GL_DELETE_STATUS:
+ // return 1;
+ case GL_COMPILE_STATUS:
+ return 1;
+ // Already defined under glGetFramebufferAttachemntParameteriv.
+ // case GL_INFO_LOG_LENGTH:
+ // return 1;
+ case GL_SHADER_SOURCE_LENGTH:
+ return 1;
+ case GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE:
+ return 1;
+
+ // -- glGetTexParameterfv, glGetTexParameteriv
+ case GL_TEXTURE_MAG_FILTER:
+ return 1;
+ case GL_TEXTURE_MIN_FILTER:
+ return 1;
+ case GL_TEXTURE_WRAP_S:
+ return 1;
+ case GL_TEXTURE_WRAP_T:
+ return 1;
+ case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+ return 1;
+
+ // -- glGetVertexAttribfv, glGetVertexAttribiv
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
+ return 1;
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ return 1;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ return 1;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ return 1;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ return 1;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ return 1;
+ case GL_CURRENT_VERTEX_ATTRIB:
+ return 4;
+
+ // -- glHint with GL_OES_standard_derivatives
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ return 1;
+
+ // Chromium internal bind_generates_resource query
+ case GL_BIND_GENERATES_RESOURCE_CHROMIUM:
+ return 1;
+
+ // bad enum
+ default:
+ return 0;
+ }
+}
+
+namespace {
+
+// Return the number of elements per group of a specified format.
+int ElementsPerGroup(int format, int type) {
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5:
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ case GL_UNSIGNED_INT_24_8_OES:
+ return 1;
+ default:
+ break;
+ }
+
+ switch (format) {
+ case GL_RGB:
+ return 3;
+ case GL_LUMINANCE_ALPHA:
+ return 2;
+ case GL_RGBA:
+ case GL_BGRA_EXT:
+ return 4;
+ case GL_ALPHA:
+ case GL_LUMINANCE:
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT24_OES:
+ case GL_DEPTH_COMPONENT32_OES:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH24_STENCIL8_OES:
+ case GL_DEPTH_STENCIL_OES:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Return the number of bytes per element, based on the element type.
+int BytesPerElement(int type) {
+ switch (type) {
+ case GL_FLOAT:
+ case GL_UNSIGNED_INT_24_8_OES:
+ case GL_UNSIGNED_INT:
+ return 4;
+ case GL_HALF_FLOAT_OES:
+ case GL_UNSIGNED_SHORT:
+ case GL_SHORT:
+ case GL_UNSIGNED_SHORT_5_6_5:
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ return 2;
+ case GL_UNSIGNED_BYTE:
+ case GL_BYTE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+} // anonymous namespace
+
+uint32 GLES2Util::ComputeImageGroupSize(int format, int type) {
+ return BytesPerElement(type) * ElementsPerGroup(format, type);
+}
+
+bool GLES2Util::ComputeImagePaddedRowSize(
+ int width, int format, int type, int unpack_alignment,
+ uint32* padded_row_size) {
+ uint32 bytes_per_group = ComputeImageGroupSize(format, type);
+ uint32 unpadded_row_size;
+ if (!SafeMultiplyUint32(width, bytes_per_group, &unpadded_row_size)) {
+ return false;
+ }
+ uint32 temp;
+ if (!SafeAddUint32(unpadded_row_size, unpack_alignment - 1, &temp)) {
+ return false;
+ }
+ *padded_row_size = (temp / unpack_alignment) * unpack_alignment;
+ return true;
+}
+
+// Returns the amount of data glTexImage2D or glTexSubImage2D will access.
+bool GLES2Util::ComputeImageDataSizes(
+ int width, int height, int format, int type, int unpack_alignment,
+ uint32* size, uint32* ret_unpadded_row_size, uint32* ret_padded_row_size) {
+ uint32 bytes_per_group = ComputeImageGroupSize(format, type);
+ uint32 row_size;
+ if (!SafeMultiplyUint32(width, bytes_per_group, &row_size)) {
+ return false;
+ }
+ if (height > 1) {
+ uint32 temp;
+ if (!SafeAddUint32(row_size, unpack_alignment - 1, &temp)) {
+ return false;
+ }
+ uint32 padded_row_size = (temp / unpack_alignment) * unpack_alignment;
+ uint32 size_of_all_but_last_row;
+ if (!SafeMultiplyUint32((height - 1), padded_row_size,
+ &size_of_all_but_last_row)) {
+ return false;
+ }
+ if (!SafeAddUint32(size_of_all_but_last_row, row_size, size)) {
+ return false;
+ }
+ if (ret_padded_row_size) {
+ *ret_padded_row_size = padded_row_size;
+ }
+ } else {
+ if (!SafeMultiplyUint32(height, row_size, size)) {
+ return false;
+ }
+ if (ret_padded_row_size) {
+ *ret_padded_row_size = row_size;
+ }
+ }
+ if (ret_unpadded_row_size) {
+ *ret_unpadded_row_size = row_size;
+ }
+
+ return true;
+}
+
+size_t GLES2Util::RenderbufferBytesPerPixel(int format) {
+ switch (format) {
+ case GL_STENCIL_INDEX8:
+ return 1;
+ case GL_RGBA4:
+ case GL_RGB565:
+ case GL_RGB5_A1:
+ case GL_DEPTH_COMPONENT16:
+ return 2;
+ case GL_RGB:
+ case GL_RGBA:
+ case GL_DEPTH24_STENCIL8_OES:
+ case GL_RGB8_OES:
+ case GL_RGBA8_OES:
+ case GL_DEPTH_COMPONENT24_OES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+uint32 GLES2Util::GetGLDataTypeSizeForUniforms(int type) {
+ switch (type) {
+ case GL_FLOAT:
+ return sizeof(GLfloat); // NOLINT
+ case GL_FLOAT_VEC2:
+ return sizeof(GLfloat) * 2; // NOLINT
+ case GL_FLOAT_VEC3:
+ return sizeof(GLfloat) * 3; // NOLINT
+ case GL_FLOAT_VEC4:
+ return sizeof(GLfloat) * 4; // NOLINT
+ case GL_INT:
+ return sizeof(GLint); // NOLINT
+ case GL_INT_VEC2:
+ return sizeof(GLint) * 2; // NOLINT
+ case GL_INT_VEC3:
+ return sizeof(GLint) * 3; // NOLINT
+ case GL_INT_VEC4:
+ return sizeof(GLint) * 4; // NOLINT
+ case GL_BOOL:
+ return sizeof(GLint); // NOLINT
+ case GL_BOOL_VEC2:
+ return sizeof(GLint) * 2; // NOLINT
+ case GL_BOOL_VEC3:
+ return sizeof(GLint) * 3; // NOLINT
+ case GL_BOOL_VEC4:
+ return sizeof(GLint) * 4; // NOLINT
+ case GL_FLOAT_MAT2:
+ return sizeof(GLfloat) * 2 * 2; // NOLINT
+ case GL_FLOAT_MAT3:
+ return sizeof(GLfloat) * 3 * 3; // NOLINT
+ case GL_FLOAT_MAT4:
+ return sizeof(GLfloat) * 4 * 4; // NOLINT
+ case GL_SAMPLER_2D:
+ return sizeof(GLint); // NOLINT
+ case GL_SAMPLER_2D_RECT_ARB:
+ return sizeof(GLint); // NOLINT
+ case GL_SAMPLER_CUBE:
+ return sizeof(GLint); // NOLINT
+ case GL_SAMPLER_EXTERNAL_OES:
+ return sizeof(GLint); // NOLINT
+ default:
+ return 0;
+ }
+}
+
+size_t GLES2Util::GetGLTypeSizeForTexturesAndBuffers(uint32 type) {
+ switch (type) {
+ case GL_BYTE:
+ return sizeof(GLbyte); // NOLINT
+ case GL_UNSIGNED_BYTE:
+ return sizeof(GLubyte); // NOLINT
+ case GL_SHORT:
+ return sizeof(GLshort); // NOLINT
+ case GL_UNSIGNED_SHORT:
+ return sizeof(GLushort); // NOLINT
+ case GL_INT:
+ return sizeof(GLint); // NOLINT
+ case GL_UNSIGNED_INT:
+ return sizeof(GLuint); // NOLINT
+ case GL_FLOAT:
+ return sizeof(GLfloat); // NOLINT
+ case GL_FIXED:
+ return sizeof(GLfixed); // NOLINT
+ default:
+ return 0;
+ }
+}
+
+uint32 GLES2Util::GLErrorToErrorBit(uint32 error) {
+ switch (error) {
+ case GL_INVALID_ENUM:
+ return gl_error_bit::kInvalidEnum;
+ case GL_INVALID_VALUE:
+ return gl_error_bit::kInvalidValue;
+ case GL_INVALID_OPERATION:
+ return gl_error_bit::kInvalidOperation;
+ case GL_OUT_OF_MEMORY:
+ return gl_error_bit::kOutOfMemory;
+ case GL_INVALID_FRAMEBUFFER_OPERATION:
+ return gl_error_bit::kInvalidFrameBufferOperation;
+ default:
+ NOTREACHED();
+ return gl_error_bit::kNoError;
+ }
+}
+
+uint32 GLES2Util::GLErrorBitToGLError(uint32 error_bit) {
+ switch (error_bit) {
+ case gl_error_bit::kInvalidEnum:
+ return GL_INVALID_ENUM;
+ case gl_error_bit::kInvalidValue:
+ return GL_INVALID_VALUE;
+ case gl_error_bit::kInvalidOperation:
+ return GL_INVALID_OPERATION;
+ case gl_error_bit::kOutOfMemory:
+ return GL_OUT_OF_MEMORY;
+ case gl_error_bit::kInvalidFrameBufferOperation:
+ return GL_INVALID_FRAMEBUFFER_OPERATION;
+ default:
+ NOTREACHED();
+ return GL_NO_ERROR;
+ }
+}
+
+uint32 GLES2Util::IndexToGLFaceTarget(int index) {
+ static uint32 faces[] = {
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ };
+ return faces[index];
+}
+
+size_t GLES2Util::GLTargetToFaceIndex(uint32 target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return 0;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+ return 0;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+ return 1;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+ return 2;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+ return 3;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+ return 4;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+ return 5;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+uint32 GLES2Util::GetPreferredGLReadPixelsFormat(uint32 internal_format) {
+ switch (internal_format) {
+ case GL_RGB16F_EXT:
+ case GL_RGB32F_EXT:
+ return GL_RGB;
+ case GL_RGBA16F_EXT:
+ case GL_RGBA32F_EXT:
+ return GL_RGBA;
+ default:
+ return GL_RGBA;
+ }
+}
+
+uint32 GLES2Util::GetPreferredGLReadPixelsType(
+ uint32 internal_format, uint32 texture_type) {
+ switch (internal_format) {
+ case GL_RGBA32F_EXT:
+ case GL_RGB32F_EXT:
+ return GL_FLOAT;
+ case GL_RGBA16F_EXT:
+ case GL_RGB16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_RGBA:
+ case GL_RGB:
+ // Unsized internal format, check the type
+ switch (texture_type) {
+ case GL_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ return GL_FLOAT;
+ default:
+ return GL_UNSIGNED_BYTE;
+ }
+ default:
+ return GL_UNSIGNED_BYTE;
+ }
+}
+
+uint32 GLES2Util::GetChannelsForFormat(int format) {
+ switch (format) {
+ case GL_ALPHA:
+ case GL_ALPHA16F_EXT:
+ case GL_ALPHA32F_EXT:
+ return kAlpha;
+ case GL_LUMINANCE:
+ return kRGB;
+ case GL_LUMINANCE_ALPHA:
+ return kRGBA;
+ case GL_RGB:
+ case GL_RGB8_OES:
+ case GL_RGB565:
+ case GL_RGB16F_EXT:
+ case GL_RGB32F_EXT:
+ return kRGB;
+ case GL_BGRA_EXT:
+ case GL_BGRA8_EXT:
+ case GL_RGBA16F_EXT:
+ case GL_RGBA32F_EXT:
+ case GL_RGBA:
+ case GL_RGBA8_OES:
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ return kRGBA;
+ case GL_DEPTH_COMPONENT32_OES:
+ case GL_DEPTH_COMPONENT24_OES:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH_COMPONENT:
+ return kDepth;
+ case GL_STENCIL_INDEX8:
+ return kStencil;
+ case GL_DEPTH_STENCIL_OES:
+ case GL_DEPTH24_STENCIL8_OES:
+ return kDepth | kStencil;
+ default:
+ return 0x0000;
+ }
+}
+
+uint32 GLES2Util::GetChannelsNeededForAttachmentType(
+ int type, uint32 max_color_attachments) {
+ switch (type) {
+ case GL_DEPTH_ATTACHMENT:
+ return kDepth;
+ case GL_STENCIL_ATTACHMENT:
+ return kStencil;
+ default:
+ if (type >= GL_COLOR_ATTACHMENT0 &&
+ type < static_cast<int>(
+ GL_COLOR_ATTACHMENT0 + max_color_attachments)) {
+ return kRGBA;
+ }
+ return 0x0000;
+ }
+}
+
+std::string GLES2Util::GetStringEnum(uint32 value) {
+ const EnumToString* entry = enum_to_string_table_;
+ const EnumToString* end = entry + enum_to_string_table_len_;
+ for (;entry < end; ++entry) {
+ if (value == entry->value) {
+ return entry->name;
+ }
+ }
+ std::stringstream ss;
+ ss.fill('0');
+ ss.width(value < 0x10000 ? 4 : 8);
+ ss << std::hex << value;
+ return "0x" + ss.str();
+}
+
+std::string GLES2Util::GetStringError(uint32 value) {
+ static EnumToString string_table[] = {
+ { GL_NONE, "GL_NONE" },
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBool(uint32 value) {
+ return value ? "GL_TRUE" : "GL_FALSE";
+}
+
+std::string GLES2Util::GetQualifiedEnumString(
+ const EnumToString* table, size_t count, uint32 value) {
+ for (const EnumToString* end = table + count; table < end; ++table) {
+ if (table->value == value) {
+ return table->name;
+ }
+ }
+ return GetStringEnum(value);
+}
+
+bool GLES2Util::ParseUniformName(
+ const std::string& name,
+ size_t* array_pos,
+ int* element_index,
+ bool* getting_array) {
+ bool getting_array_location = false;
+ size_t open_pos = std::string::npos;
+ int index = 0;
+ if (name[name.size() - 1] == ']') {
+ if (name.size() < 3) {
+ return false;
+ }
+ open_pos = name.find_last_of('[');
+ if (open_pos == std::string::npos ||
+ open_pos >= name.size() - 2) {
+ return false;
+ }
+ size_t last = name.size() - 1;
+ for (size_t pos = open_pos + 1; pos < last; ++pos) {
+ int8 digit = name[pos] - '0';
+ if (digit < 0 || digit > 9) {
+ return false;
+ }
+ index = index * 10 + digit;
+ }
+ getting_array_location = true;
+ }
+ *getting_array = getting_array_location;
+ *element_index = index;
+ *array_pos = open_pos;
+ return true;
+}
+
+namespace {
+
+// WebGraphicsContext3DCommandBufferImpl configuration attributes. Those in
+// the 16-bit range are the same as used by EGL. Those outside the 16-bit range
+// are unique to Chromium. Attributes are matched using a closest fit algorithm.
+
+// From <EGL/egl.h>.
+const int32 kAlphaSize = 0x3021; // EGL_ALPHA_SIZE
+const int32 kBlueSize = 0x3022; // EGL_BLUE_SIZE
+const int32 kGreenSize = 0x3023; // EGL_GREEN_SIZE
+const int32 kRedSize = 0x3024; // EGL_RED_SIZE
+const int32 kDepthSize = 0x3025; // EGL_DEPTH_SIZE
+const int32 kStencilSize = 0x3026; // EGL_STENCIL_SIZE
+const int32 kSamples = 0x3031; // EGL_SAMPLES
+const int32 kSampleBuffers = 0x3032; // EGL_SAMPLE_BUFFERS
+const int32 kNone = 0x3038; // EGL_NONE
+const int32 kSwapBehavior = 0x3093; // EGL_SWAP_BEHAVIOR
+const int32 kBufferPreserved = 0x3094; // EGL_BUFFER_PRESERVED
+const int32 kBufferDestroyed = 0x3095; // EGL_BUFFER_DESTROYED
+
+// Chromium only.
+const int32 kBindGeneratesResource = 0x10000;
+const int32 kFailIfMajorPerfCaveat = 0x10001;
+const int32 kLoseContextWhenOutOfMemory = 0x10002;
+
+} // namespace
+
+ContextCreationAttribHelper::ContextCreationAttribHelper()
+ : alpha_size(-1),
+ blue_size(-1),
+ green_size(-1),
+ red_size(-1),
+ depth_size(-1),
+ stencil_size(-1),
+ samples(-1),
+ sample_buffers(-1),
+ buffer_preserved(true),
+ bind_generates_resource(true),
+ fail_if_major_perf_caveat(false),
+ lose_context_when_out_of_memory(false) {}
+
+void ContextCreationAttribHelper::Serialize(std::vector<int32>* attribs) const {
+ if (alpha_size != -1) {
+ attribs->push_back(kAlphaSize);
+ attribs->push_back(alpha_size);
+ }
+ if (blue_size != -1) {
+ attribs->push_back(kBlueSize);
+ attribs->push_back(blue_size);
+ }
+ if (green_size != -1) {
+ attribs->push_back(kGreenSize);
+ attribs->push_back(green_size);
+ }
+ if (red_size != -1) {
+ attribs->push_back(kRedSize);
+ attribs->push_back(red_size);
+ }
+ if (depth_size != -1) {
+ attribs->push_back(kDepthSize);
+ attribs->push_back(depth_size);
+ }
+ if (stencil_size != -1) {
+ attribs->push_back(kStencilSize);
+ attribs->push_back(stencil_size);
+ }
+ if (samples != -1) {
+ attribs->push_back(kSamples);
+ attribs->push_back(samples);
+ }
+ if (sample_buffers != -1) {
+ attribs->push_back(kSampleBuffers);
+ attribs->push_back(sample_buffers);
+ }
+ attribs->push_back(kSwapBehavior);
+ attribs->push_back(buffer_preserved ? kBufferPreserved : kBufferDestroyed);
+ attribs->push_back(kBindGeneratesResource);
+ attribs->push_back(bind_generates_resource ? 1 : 0);
+ attribs->push_back(kFailIfMajorPerfCaveat);
+ attribs->push_back(fail_if_major_perf_caveat ? 1 : 0);
+ attribs->push_back(kLoseContextWhenOutOfMemory);
+ attribs->push_back(lose_context_when_out_of_memory ? 1 : 0);
+ attribs->push_back(kNone);
+}
+
+bool ContextCreationAttribHelper::Parse(const std::vector<int32>& attribs) {
+ for (size_t i = 0; i < attribs.size(); i += 2) {
+ const int32 attrib = attribs[i];
+ if (i + 1 >= attribs.size()) {
+ if (attrib == kNone) {
+ return true;
+ }
+
+ DLOG(ERROR) << "Missing value after context creation attribute: "
+ << attrib;
+ return false;
+ }
+
+ const int32 value = attribs[i+1];
+ switch (attrib) {
+ case kAlphaSize:
+ alpha_size = value;
+ break;
+ case kBlueSize:
+ blue_size = value;
+ break;
+ case kGreenSize:
+ green_size = value;
+ break;
+ case kRedSize:
+ red_size = value;
+ break;
+ case kDepthSize:
+ depth_size = value;
+ break;
+ case kStencilSize:
+ stencil_size = value;
+ break;
+ case kSamples:
+ samples = value;
+ break;
+ case kSampleBuffers:
+ sample_buffers = value;
+ break;
+ case kSwapBehavior:
+ buffer_preserved = value == kBufferPreserved;
+ break;
+ case kBindGeneratesResource:
+ bind_generates_resource = value != 0;
+ break;
+ case kFailIfMajorPerfCaveat:
+ fail_if_major_perf_caveat = value != 0;
+ break;
+ case kLoseContextWhenOutOfMemory:
+ lose_context_when_out_of_memory = value != 0;
+ break;
+ case kNone:
+ // Terminate list, even if more attributes.
+ return true;
+ default:
+ DLOG(ERROR) << "Invalid context creation attribute: " << attrib;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#include "gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/common/gles2_cmd_utils.h b/gpu/command_buffer/common/gles2_cmd_utils.h
new file mode 100644
index 0000000..163ffc0
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -0,0 +1,224 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is here so other GLES2 related files can have a common set of
+// includes where appropriate.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "gpu/command_buffer/common/gles2_utils_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Does a multiply and checks for overflow. If the multiply did not overflow
+// returns true.
+
+// Multiplies 2 32 bit unsigned numbers checking for overflow.
+// If there was no overflow returns true.
+inline bool SafeMultiplyUint32(uint32_t a, uint32_t b, uint32_t* dst) {
+ if (b == 0) {
+ *dst = 0;
+ return true;
+ }
+ uint32_t v = a * b;
+ if (v / b != a) {
+ *dst = 0;
+ return false;
+ }
+ *dst = v;
+ return true;
+}
+
+// Does an add checking for overflow. If there was no overflow returns true.
+inline bool SafeAddUint32(uint32_t a, uint32_t b, uint32_t* dst) {
+ if (a + b < a) {
+ *dst = 0;
+ return false;
+ }
+ *dst = a + b;
+ return true;
+}
+
+// Does an add checking for overflow. If there was no overflow returns true.
+inline bool SafeAddInt32(int32_t a, int32_t b, int32_t* dst) {
+ int64_t sum64 = static_cast<int64_t>(a) + b;
+ int32_t sum32 = static_cast<int32_t>(sum64);
+ bool safe = sum64 == static_cast<int64_t>(sum32);
+ *dst = safe ? sum32 : 0;
+ return safe;
+}
+
+// Return false if |value| is more than a 32 bit integer can represent.
+template<typename T>
+inline bool FitInt32NonNegative(T value) {
+ const int32_t max = std::numeric_limits<int32_t>::max();
+ return (std::numeric_limits<T>::max() <= max ||
+ value <= static_cast<T>(max));
+}
+
+// Utilties for GLES2 support.
+class GLES2_UTILS_EXPORT GLES2Util {
+ public:
+ static const int kNumFaces = 6;
+
+ // Bits returned by GetChannelsForFormat
+ enum ChannelBits {
+ kRed = 0x1,
+ kGreen = 0x2,
+ kBlue = 0x4,
+ kAlpha = 0x8,
+ kDepth = 0x10000,
+ kStencil = 0x20000,
+
+ kRGB = kRed | kGreen | kBlue,
+ kRGBA = kRGB | kAlpha
+ };
+
+ struct EnumToString {
+ uint32_t value;
+ const char* name;
+ };
+
+ GLES2Util()
+ : num_compressed_texture_formats_(0),
+ num_shader_binary_formats_(0) {
+ }
+
+ int num_compressed_texture_formats() const {
+ return num_compressed_texture_formats_;
+ }
+
+ void set_num_compressed_texture_formats(int num_compressed_texture_formats) {
+ num_compressed_texture_formats_ = num_compressed_texture_formats;
+ }
+
+ int num_shader_binary_formats() const {
+ return num_shader_binary_formats_;
+ }
+
+ void set_num_shader_binary_formats(int num_shader_binary_formats) {
+ num_shader_binary_formats_ = num_shader_binary_formats;
+ }
+
+ // Gets the number of values a particular id will return when a glGet
+ // function is called. If 0 is returned the id is invalid.
+ int GLGetNumValuesReturned(int id) const;
+
+ // Computes the size of a single group of elements from a format and type pair
+ static uint32_t ComputeImageGroupSize(int format, int type);
+
+ // Computes the size of an image row including alignment padding
+ static bool ComputeImagePaddedRowSize(
+ int width, int format, int type, int unpack_alignment,
+ uint32_t* padded_row_size);
+
+ // Computes the size of image data for TexImage2D and TexSubImage2D.
+ // Optionally the unpadded and padded row sizes can be returned. If height < 2
+ // then the padded_row_size will be the same as the unpadded_row_size since
+ // padding is not necessary.
+ static bool ComputeImageDataSizes(
+ int width, int height, int format, int type, int unpack_alignment,
+ uint32_t* size, uint32_t* unpadded_row_size, uint32_t* padded_row_size);
+
+ static size_t RenderbufferBytesPerPixel(int format);
+
+ static uint32_t GetGLDataTypeSizeForUniforms(int type);
+
+ static size_t GetGLTypeSizeForTexturesAndBuffers(uint32_t type);
+
+ static uint32_t GLErrorToErrorBit(uint32_t gl_error);
+
+ static uint32_t GLErrorBitToGLError(uint32_t error_bit);
+
+ static uint32_t IndexToGLFaceTarget(int index);
+
+ static size_t GLTargetToFaceIndex(uint32_t target);
+
+ static uint32_t GetPreferredGLReadPixelsFormat(uint32_t internal_format);
+
+ static uint32_t GetPreferredGLReadPixelsType(
+ uint32_t internal_format, uint32_t texture_type);
+
+ // Returns a bitmask for the channels the given format supports.
+ // See ChannelBits.
+ static uint32_t GetChannelsForFormat(int format);
+
+ // Returns a bitmask for the channels the given attachment type needs.
+ static uint32_t GetChannelsNeededForAttachmentType(
+ int type, uint32_t max_color_attachments);
+
+ // Return true if value is neither a power of two nor zero.
+ static bool IsNPOT(uint32_t value) {
+ return (value & (value - 1)) != 0;
+ }
+
+ // Return true if value is a power of two or zero.
+ static bool IsPOT(uint32_t value) {
+ return (value & (value - 1)) == 0;
+ }
+
+ static std::string GetStringEnum(uint32_t value);
+ static std::string GetStringBool(uint32_t value);
+ static std::string GetStringError(uint32_t value);
+
+ // Parses a uniform name.
+ // array_pos: the position of the last '[' character in name.
+ // element_index: the index of the array element specifed in the name.
+ // getting_array: True if name refers to array.
+ // returns true of parsing was successful. Returing true does NOT mean
+ // it's a valid uniform name. On the otherhand, returning false does mean
+ // it's an invalid uniform name.
+ static bool ParseUniformName(
+ const std::string& name,
+ size_t* array_pos,
+ int* element_index,
+ bool* getting_array);
+
+ #include "../common/gles2_cmd_utils_autogen.h"
+
+ private:
+ static std::string GetQualifiedEnumString(
+ const EnumToString* table, size_t count, uint32_t value);
+
+ static const EnumToString* const enum_to_string_table_;
+ static const size_t enum_to_string_table_len_;
+
+ int num_compressed_texture_formats_;
+ int num_shader_binary_formats_;
+};
+
+struct GLES2_UTILS_EXPORT ContextCreationAttribHelper {
+ ContextCreationAttribHelper();
+
+ void Serialize(std::vector<int32_t>* attribs) const;
+ bool Parse(const std::vector<int32_t>& attribs);
+
+ // -1 if invalid or unspecified.
+ int32_t alpha_size;
+ int32_t blue_size;
+ int32_t green_size;
+ int32_t red_size;
+ int32_t depth_size;
+ int32_t stencil_size;
+ int32_t samples;
+ int32_t sample_buffers;
+ bool buffer_preserved;
+ bool bind_generates_resource;
+ bool fail_if_major_perf_caveat;
+ bool lose_context_when_out_of_memory;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_H_
+
diff --git a/gpu/command_buffer/common/gles2_cmd_utils_autogen.h b/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
new file mode 100644
index 0000000..1871201
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_AUTOGEN_H_
+
+static std::string GetStringAttachment(uint32_t value);
+static std::string GetStringBackbufferAttachment(uint32_t value);
+static std::string GetStringBlitFilter(uint32_t value);
+static std::string GetStringBufferParameter(uint32_t value);
+static std::string GetStringBufferTarget(uint32_t value);
+static std::string GetStringBufferUsage(uint32_t value);
+static std::string GetStringCapability(uint32_t value);
+static std::string GetStringCmpFunction(uint32_t value);
+static std::string GetStringCompressedTextureFormat(uint32_t value);
+static std::string GetStringDrawMode(uint32_t value);
+static std::string GetStringDstBlendFactor(uint32_t value);
+static std::string GetStringEquation(uint32_t value);
+static std::string GetStringFaceMode(uint32_t value);
+static std::string GetStringFaceType(uint32_t value);
+static std::string GetStringFrameBufferParameter(uint32_t value);
+static std::string GetStringFrameBufferTarget(uint32_t value);
+static std::string GetStringGLState(uint32_t value);
+static std::string GetStringGetMaxIndexType(uint32_t value);
+static std::string GetStringGetTexParamTarget(uint32_t value);
+static std::string GetStringHintMode(uint32_t value);
+static std::string GetStringHintTarget(uint32_t value);
+static std::string GetStringImageInternalFormat(uint32_t value);
+static std::string GetStringImageUsage(uint32_t value);
+static std::string GetStringIndexType(uint32_t value);
+static std::string GetStringMatrixMode(uint32_t value);
+static std::string GetStringPixelStore(uint32_t value);
+static std::string GetStringPixelType(uint32_t value);
+static std::string GetStringProgramParameter(uint32_t value);
+static std::string GetStringQueryObjectParameter(uint32_t value);
+static std::string GetStringQueryParameter(uint32_t value);
+static std::string GetStringQueryTarget(uint32_t value);
+static std::string GetStringReadPixelFormat(uint32_t value);
+static std::string GetStringReadPixelType(uint32_t value);
+static std::string GetStringRenderBufferFormat(uint32_t value);
+static std::string GetStringRenderBufferParameter(uint32_t value);
+static std::string GetStringRenderBufferTarget(uint32_t value);
+static std::string GetStringResetStatus(uint32_t value);
+static std::string GetStringShaderBinaryFormat(uint32_t value);
+static std::string GetStringShaderParameter(uint32_t value);
+static std::string GetStringShaderPrecision(uint32_t value);
+static std::string GetStringShaderType(uint32_t value);
+static std::string GetStringSrcBlendFactor(uint32_t value);
+static std::string GetStringStencilOp(uint32_t value);
+static std::string GetStringStringType(uint32_t value);
+static std::string GetStringTextureBindTarget(uint32_t value);
+static std::string GetStringTextureFormat(uint32_t value);
+static std::string GetStringTextureInternalFormat(uint32_t value);
+static std::string GetStringTextureInternalFormatStorage(uint32_t value);
+static std::string GetStringTextureMagFilterMode(uint32_t value);
+static std::string GetStringTextureMinFilterMode(uint32_t value);
+static std::string GetStringTextureParameter(uint32_t value);
+static std::string GetStringTexturePool(uint32_t value);
+static std::string GetStringTextureTarget(uint32_t value);
+static std::string GetStringTextureUsage(uint32_t value);
+static std::string GetStringTextureWrapMode(uint32_t value);
+static std::string GetStringVertexAttribType(uint32_t value);
+static std::string GetStringVertexAttribute(uint32_t value);
+static std::string GetStringVertexPointer(uint32_t value);
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
new file mode 100644
index 0000000..c5bbc54
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -0,0 +1,3893 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_IMPLEMENTATION_AUTOGEN_H_
+
+static const GLES2Util::EnumToString enum_to_string_table[] = {
+ {
+ 0x78EC,
+ "GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM",
+ },
+ {
+ 0x8825,
+ "GL_DRAW_BUFFER0_EXT",
+ },
+ {
+ 0x0BC1,
+ "GL_ALPHA_TEST_FUNC_QCOM",
+ },
+ {
+ 0x884C,
+ "GL_TEXTURE_COMPARE_MODE_EXT",
+ },
+ {
+ 0x0BC2,
+ "GL_ALPHA_TEST_REF_QCOM",
+ },
+ {
+ 0x884D,
+ "GL_TEXTURE_COMPARE_FUNC_EXT",
+ },
+ {
+ 0x884E,
+ "GL_COMPARE_REF_TO_TEXTURE_EXT",
+ },
+ {
+ 0x93A1,
+ "GL_BGRA8_EXT",
+ },
+ {
+ 0,
+ "GL_FALSE",
+ },
+ {
+ 0x00400000,
+ "GL_STENCIL_BUFFER_BIT6_QCOM",
+ },
+ {
+ 0x9138,
+ "GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG",
+ },
+ {
+ 0x8FC4,
+ "GL_SHADER_BINARY_VIV",
+ },
+ {
+ 0x9130,
+ "GL_SGX_PROGRAM_BINARY_IMG",
+ },
+ {
+ 0x9133,
+ "GL_RENDERBUFFER_SAMPLES_IMG",
+ },
+ {
+ 0x82E0,
+ "GL_BUFFER_KHR",
+ },
+ {
+ 0x9135,
+ "GL_MAX_SAMPLES_IMG",
+ },
+ {
+ 0x9134,
+ "GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG",
+ },
+ {
+ 0x9137,
+ "GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG",
+ },
+ {
+ 0x9136,
+ "GL_TEXTURE_SAMPLES_IMG",
+ },
+ {
+ 0x00000020,
+ "GL_COLOR_BUFFER_BIT5_QCOM",
+ },
+ {
+ 0x0008,
+ "GL_MAP_INVALIDATE_BUFFER_BIT_EXT",
+ },
+ {
+ 0x0BC0,
+ "GL_ALPHA_TEST_QCOM",
+ },
+ {
+ 0x0006,
+ "GL_TRIANGLE_FAN",
+ },
+ {
+ 0x0004,
+ "GL_TRIANGLES",
+ },
+ {
+ 0x0005,
+ "GL_TRIANGLE_STRIP",
+ },
+ {
+ 0x0002,
+ "GL_LINE_LOOP",
+ },
+ {
+ 0x0003,
+ "GL_LINE_STRIP",
+ },
+ {
+ 0x0000,
+ "GL_POINTS",
+ },
+ {
+ 0x0001,
+ "GL_LINES",
+ },
+ {
+ 0x78F0,
+ "GL_IMAGE_ROWBYTES_CHROMIUM",
+ },
+ {
+ 0x88B8,
+ "GL_READ_ONLY",
+ },
+ {
+ 0x88B9,
+ "GL_WRITE_ONLY_OES",
+ },
+ {
+ 0x8211,
+ "GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT",
+ },
+ {
+ 0x8210,
+ "GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT",
+ },
+ {
+ 0x8741,
+ "GL_PROGRAM_BINARY_LENGTH_OES",
+ },
+ {
+ 0x8740,
+ "GL_Z400_BINARY_AMD",
+ },
+ {
+ 0x8192,
+ "GL_GENERATE_MIPMAP_HINT",
+ },
+ {
+ 0x8A54,
+ "GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT",
+ },
+ {
+ 0x8A55,
+ "GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT",
+ },
+ {
+ 0x8A56,
+ "GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT",
+ },
+ {
+ 0x8A57,
+ "GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT",
+ },
+ {
+ 0x8A51,
+ "GL_RGB_RAW_422_APPLE",
+ },
+ {
+ 0x87F9,
+ "GL_3DC_X_AMD",
+ },
+ {
+ 0x8A53,
+ "GL_SYNC_OBJECT_APPLE",
+ },
+ {
+ 0x8DF8,
+ "GL_SHADER_BINARY_FORMATS",
+ },
+ {
+ 0x8DF9,
+ "GL_NUM_SHADER_BINARY_FORMATS",
+ },
+ {
+ 0x826D,
+ "GL_DEBUG_GROUP_STACK_DEPTH_KHR",
+ },
+ {
+ 0x826B,
+ "GL_DEBUG_SEVERITY_NOTIFICATION_KHR",
+ },
+ {
+ 0x826C,
+ "GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR",
+ },
+ {
+ 0x8B59,
+ "GL_BOOL_VEC4",
+ },
+ {
+ 0x826A,
+ "GL_DEBUG_TYPE_POP_GROUP_KHR",
+ },
+ {
+ 0x8B57,
+ "GL_BOOL_VEC2",
+ },
+ {
+ 0x8DF1,
+ "GL_MEDIUM_FLOAT",
+ },
+ {
+ 0x8B55,
+ "GL_INT_VEC4",
+ },
+ {
+ 0x8B54,
+ "GL_INT_VEC3",
+ },
+ {
+ 0x8DF4,
+ "GL_MEDIUM_INT",
+ },
+ {
+ 0x8DF5,
+ "GL_HIGH_INT",
+ },
+ {
+ 0x8B51,
+ "GL_FLOAT_VEC3",
+ },
+ {
+ 0x8B50,
+ "GL_FLOAT_VEC2",
+ },
+ {
+ 0x806F,
+ "GL_TEXTURE_3D_OES",
+ },
+ {
+ 0x92E0,
+ "GL_DEBUG_OUTPUT_KHR",
+ },
+ {
+ 0x806A,
+ "GL_TEXTURE_BINDING_3D_OES",
+ },
+ {
+ 0x8CE3,
+ "GL_COLOR_ATTACHMENT3_EXT",
+ },
+ {
+ 0x1904,
+ "GL_GREEN_NV",
+ },
+ {
+ 0x928D,
+ "GL_DST_OUT_NV",
+ },
+ {
+ 0x8069,
+ "GL_TEXTURE_BINDING_2D",
+ },
+ {
+ 0x8261,
+ "GL_NO_RESET_NOTIFICATION_EXT",
+ },
+ {
+ 0x8DFA,
+ "GL_SHADER_COMPILER",
+ },
+ {
+ 0x8DFB,
+ "GL_MAX_VERTEX_UNIFORM_VECTORS",
+ },
+ {
+ 0x8DFC,
+ "GL_MAX_VARYING_VECTORS",
+ },
+ {
+ 0x8B5C,
+ "GL_FLOAT_MAT4",
+ },
+ {
+ 0x8B5B,
+ "GL_FLOAT_MAT3",
+ },
+ {
+ 0x8268,
+ "GL_DEBUG_TYPE_MARKER_KHR",
+ },
+ {
+ 0x8269,
+ "GL_DEBUG_TYPE_PUSH_GROUP_KHR",
+ },
+ {
+ 0x1905,
+ "GL_BLUE_NV",
+ },
+ {
+ 0x87FF,
+ "GL_PROGRAM_BINARY_FORMATS_OES",
+ },
+ {
+ 0x87FE,
+ "GL_NUM_PROGRAM_BINARY_FORMATS_OES",
+ },
+ {
+ 0x2600,
+ "GL_NEAREST",
+ },
+ {
+ 0x2601,
+ "GL_LINEAR",
+ },
+ {
+ 0x8C03,
+ "GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG",
+ },
+ {
+ 0x9242,
+ "GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM",
+ },
+ {
+ 0x88BB,
+ "GL_BUFFER_ACCESS_OES",
+ },
+ {
+ 0x88BC,
+ "GL_BUFFER_MAPPED_OES",
+ },
+ {
+ 0x88BD,
+ "GL_BUFFER_MAP_POINTER_OES",
+ },
+ {
+ 0x88BF,
+ "GL_TIME_ELAPSED_EXT",
+ },
+ {
+ 0x0C10,
+ "GL_SCISSOR_BOX",
+ },
+ {
+ 0x0C11,
+ "GL_SCISSOR_TEST",
+ },
+ {
+ 0x80000000,
+ "GL_MULTISAMPLE_BUFFER_BIT7_QCOM",
+ },
+ {
+ 0x8A48,
+ "GL_TEXTURE_SRGB_DECODE_EXT",
+ },
+ {
+ 0x300E,
+ "GL_CONTEXT_LOST",
+ },
+ {
+ 0x02000000,
+ "GL_MULTISAMPLE_BUFFER_BIT1_QCOM",
+ },
+ {
+ 0x8C2F,
+ "GL_ANY_SAMPLES_PASSED_EXT",
+ },
+ {
+ 0x8BD2,
+ "GL_TEXTURE_WIDTH_QCOM",
+ },
+ {
+ 0x8BD3,
+ "GL_TEXTURE_HEIGHT_QCOM",
+ },
+ {
+ 0x8BD4,
+ "GL_TEXTURE_DEPTH_QCOM",
+ },
+ {
+ 0x8BD5,
+ "GL_TEXTURE_INTERNAL_FORMAT_QCOM",
+ },
+ {
+ 0x8BD6,
+ "GL_TEXTURE_FORMAT_QCOM",
+ },
+ {
+ 0x8BD7,
+ "GL_TEXTURE_TYPE_QCOM",
+ },
+ {
+ 0x8B8D,
+ "GL_CURRENT_PROGRAM",
+ },
+ {
+ 0x8BD9,
+ "GL_TEXTURE_NUM_LEVELS_QCOM",
+ },
+ {
+ 0x00200000,
+ "GL_STENCIL_BUFFER_BIT5_QCOM",
+ },
+ {
+ 0x8B8A,
+ "GL_ACTIVE_ATTRIBUTE_MAX_LENGTH",
+ },
+ {
+ 0x8B8B,
+ "GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES",
+ },
+ {
+ 0x8B8C,
+ "GL_SHADING_LANGUAGE_VERSION",
+ },
+ {
+ 0x8BDA,
+ "GL_TEXTURE_TARGET_QCOM",
+ },
+ {
+ 0x8BDB,
+ "GL_TEXTURE_OBJECT_VALID_QCOM",
+ },
+ {
+ 0x8BDC,
+ "GL_STATE_RESTORE",
+ },
+ {
+ 0x8B88,
+ "GL_SHADER_SOURCE_LENGTH",
+ },
+ {
+ 0x8B89,
+ "GL_ACTIVE_ATTRIBUTES",
+ },
+ {
+ 0x93C9,
+ "GL_COMPRESSED_RGBA_ASTC_6x6x6_OES",
+ },
+ {
+ 0x93C8,
+ "GL_COMPRESSED_RGBA_ASTC_6x6x5_OES",
+ },
+ {
+ 0x8B84,
+ "GL_INFO_LOG_LENGTH",
+ },
+ {
+ 0x8B85,
+ "GL_ATTACHED_SHADERS",
+ },
+ {
+ 0x8B86,
+ "GL_ACTIVE_UNIFORMS",
+ },
+ {
+ 0x8B87,
+ "GL_ACTIVE_UNIFORM_MAX_LENGTH",
+ },
+ {
+ 0x8B80,
+ "GL_DELETE_STATUS",
+ },
+ {
+ 0x8B81,
+ "GL_COMPILE_STATUS",
+ },
+ {
+ 0x8B82,
+ "GL_LINK_STATUS",
+ },
+ {
+ 0x8B83,
+ "GL_VALIDATE_STATUS",
+ },
+ {
+ 0x8D48,
+ "GL_STENCIL_INDEX8",
+ },
+ {
+ 0x8D46,
+ "GL_STENCIL_INDEX1_OES",
+ },
+ {
+ 0x8D47,
+ "GL_STENCIL_INDEX4_OES",
+ },
+ {
+ 0x8D44,
+ "GL_RENDERBUFFER_INTERNAL_FORMAT",
+ },
+ {
+ 0x00000100,
+ "GL_DEPTH_BUFFER_BIT",
+ },
+ {
+ 0x8D42,
+ "GL_RENDERBUFFER_WIDTH",
+ },
+ {
+ 0x8D43,
+ "GL_RENDERBUFFER_HEIGHT",
+ },
+ {
+ 0x8D40,
+ "GL_FRAMEBUFFER",
+ },
+ {
+ 0x8D41,
+ "GL_RENDERBUFFER",
+ },
+ {
+ 0x0BD0,
+ "GL_DITHER",
+ },
+ {
+ 0x93D3,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR",
+ },
+ {
+ 0x1801,
+ "GL_DEPTH_EXT",
+ },
+ {
+ 0x1800,
+ "GL_COLOR_EXT",
+ },
+ {
+ 0x1802,
+ "GL_STENCIL_EXT",
+ },
+ {
+ 0x0B21,
+ "GL_LINE_WIDTH",
+ },
+ {
+ 0x81A5,
+ "GL_DEPTH_COMPONENT16",
+ },
+ {
+ 0x81A6,
+ "GL_DEPTH_COMPONENT24_OES",
+ },
+ {
+ 0x81A7,
+ "GL_DEPTH_COMPONENT32_OES",
+ },
+ {
+ 0x88FE,
+ "GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE",
+ },
+ {
+ 0x8B6A,
+ "GL_FLOAT_MAT4x3_NV",
+ },
+ {
+ 0x93D0,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR",
+ },
+ {
+ 0x9143,
+ "GL_MAX_DEBUG_MESSAGE_LENGTH_KHR",
+ },
+ {
+ 0x9144,
+ "GL_MAX_DEBUG_LOGGED_MESSAGES_KHR",
+ },
+ {
+ 0x9145,
+ "GL_DEBUG_LOGGED_MESSAGES_KHR",
+ },
+ {
+ 0x9146,
+ "GL_DEBUG_SEVERITY_HIGH_KHR",
+ },
+ {
+ 0x9147,
+ "GL_DEBUG_SEVERITY_MEDIUM_KHR",
+ },
+ {
+ 0x9148,
+ "GL_DEBUG_SEVERITY_LOW_KHR",
+ },
+ {
+ 0x9260,
+ "GL_GCCSO_SHADER_BINARY_FJ",
+ },
+ {
+ 0x8F60,
+ "GL_MALI_SHADER_BINARY_ARM",
+ },
+ {
+ 0x8F61,
+ "GL_MALI_PROGRAM_BINARY_ARM",
+ },
+ {
+ 0x87EE,
+ "GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD",
+ },
+ {
+ 0x822B,
+ "GL_RG8_EXT",
+ },
+ {
+ 0x822F,
+ "GL_RG16F_EXT",
+ },
+ {
+ 0x822D,
+ "GL_R16F_EXT",
+ },
+ {
+ 0x822E,
+ "GL_R32F_EXT",
+ },
+ {
+ 1,
+ "GL_ES_VERSION_2_0",
+ },
+ {
+ 0x84F9,
+ "GL_DEPTH_STENCIL_OES",
+ },
+ {
+ 0x8368,
+ "GL_UNSIGNED_INT_2_10_10_10_REV_EXT",
+ },
+ {
+ 0x8819,
+ "GL_LUMINANCE_ALPHA32F_EXT",
+ },
+ {
+ 0x8818,
+ "GL_LUMINANCE32F_EXT",
+ },
+ {
+ 0x8363,
+ "GL_UNSIGNED_SHORT_5_6_5",
+ },
+ {
+ 0x8814,
+ "GL_RGBA32F_EXT",
+ },
+ {
+ 0x84F2,
+ "GL_ALL_COMPLETED_NV",
+ },
+ {
+ 0x8816,
+ "GL_ALPHA32F_EXT",
+ },
+ {
+ 0x84F4,
+ "GL_FENCE_CONDITION_NV",
+ },
+ {
+ 0x8366,
+ "GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT",
+ },
+ {
+ 0x8365,
+ "GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT",
+ },
+ {
+ 0x84F7,
+ "GL_COMMANDS_COMPLETED_CHROMIUM",
+ },
+ {
+ 0x881E,
+ "GL_LUMINANCE16F_EXT",
+ },
+ {
+ 0x84FA,
+ "GL_UNSIGNED_INT_24_8_OES",
+ },
+ {
+ 0x881F,
+ "GL_LUMINANCE_ALPHA16F_EXT",
+ },
+ {
+ 0x881A,
+ "GL_RGBA16F_EXT",
+ },
+ {
+ 0x84FE,
+ "GL_TEXTURE_MAX_ANISOTROPY_EXT",
+ },
+ {
+ 0x0901,
+ "GL_CCW",
+ },
+ {
+ 0x0900,
+ "GL_CW",
+ },
+ {
+ 0x8229,
+ "GL_R8_EXT",
+ },
+ {
+ 0x9283,
+ "GL_DISJOINT_NV",
+ },
+ {
+ 0x8227,
+ "GL_RG_EXT",
+ },
+ {
+ 0x8B66,
+ "GL_FLOAT_MAT2x4_NV",
+ },
+ {
+ 0x8B67,
+ "GL_FLOAT_MAT3x2_NV",
+ },
+ {
+ 0x8B65,
+ "GL_FLOAT_MAT2x3_NV",
+ },
+ {
+ 0x8B62,
+ "GL_SAMPLER_2D_SHADOW_EXT",
+ },
+ {
+ 0x8B63,
+ "GL_SAMPLER_2D_RECT_ARB",
+ },
+ {
+ 0x8B60,
+ "GL_SAMPLER_CUBE",
+ },
+ {
+ 0x00001000,
+ "GL_DEPTH_BUFFER_BIT4_QCOM",
+ },
+ {
+ 0x8B68,
+ "GL_FLOAT_MAT3x4_NV",
+ },
+ {
+ 0x83F0,
+ "GL_COMPRESSED_RGB_S3TC_DXT1_EXT",
+ },
+ {
+ 0x00000080,
+ "GL_COLOR_BUFFER_BIT7_QCOM",
+ },
+ {
+ 0x88F0,
+ "GL_DEPTH24_STENCIL8_OES",
+ },
+ {
+ 0x80A0,
+ "GL_SAMPLE_COVERAGE",
+ },
+ {
+ 0x928F,
+ "GL_DST_ATOP_NV",
+ },
+ {
+ 0x80A9,
+ "GL_SAMPLES",
+ },
+ {
+ 0x80A8,
+ "GL_SAMPLE_BUFFERS",
+ },
+ {
+ 0x0D55,
+ "GL_ALPHA_BITS",
+ },
+ {
+ 0x0D54,
+ "GL_BLUE_BITS",
+ },
+ {
+ 0x0D57,
+ "GL_STENCIL_BITS",
+ },
+ {
+ 0x0D56,
+ "GL_DEPTH_BITS",
+ },
+ {
+ 0x8CD5,
+ "GL_FRAMEBUFFER_COMPLETE",
+ },
+ {
+ 0x0D50,
+ "GL_SUBPIXEL_BITS",
+ },
+ {
+ 0x0D53,
+ "GL_GREEN_BITS",
+ },
+ {
+ 0x0D52,
+ "GL_RED_BITS",
+ },
+ {
+ 0x8037,
+ "GL_POLYGON_OFFSET_FILL",
+ },
+ {
+ 0x928C,
+ "GL_SRC_OUT_NV",
+ },
+ {
+ 0x8034,
+ "GL_UNSIGNED_SHORT_5_5_5_1",
+ },
+ {
+ 0x8033,
+ "GL_UNSIGNED_SHORT_4_4_4_4",
+ },
+ {
+ 0x928B,
+ "GL_DST_IN_NV",
+ },
+ {
+ 0x0305,
+ "GL_ONE_MINUS_DST_ALPHA",
+ },
+ {
+ 0x0304,
+ "GL_DST_ALPHA",
+ },
+ {
+ 0x0307,
+ "GL_ONE_MINUS_DST_COLOR",
+ },
+ {
+ 0x0306,
+ "GL_DST_COLOR",
+ },
+ {
+ 0x0301,
+ "GL_ONE_MINUS_SRC_COLOR",
+ },
+ {
+ 0x0300,
+ "GL_SRC_COLOR",
+ },
+ {
+ 0x0303,
+ "GL_ONE_MINUS_SRC_ALPHA",
+ },
+ {
+ 0x0302,
+ "GL_SRC_ALPHA",
+ },
+ {
+ 0x0308,
+ "GL_SRC_ALPHA_SATURATE",
+ },
+ {
+ 0x2A00,
+ "GL_POLYGON_OFFSET_UNITS",
+ },
+ {
+ 0xFFFFFFFF,
+ "GL_ALL_SHADER_BITS_EXT",
+ },
+ {
+ 0x00800000,
+ "GL_STENCIL_BUFFER_BIT7_QCOM",
+ },
+ {
+ 0x8C4D,
+ "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV",
+ },
+ {
+ 0x00020000,
+ "GL_STENCIL_BUFFER_BIT1_QCOM",
+ },
+ {
+ 0x8D00,
+ "GL_DEPTH_ATTACHMENT",
+ },
+ {
+ 0x8FA0,
+ "GL_PERFMON_GLOBAL_MODE_QCOM",
+ },
+ {
+ 0x8815,
+ "GL_RGB32F_EXT",
+ },
+ {
+ 0x813D,
+ "GL_TEXTURE_MAX_LEVEL_APPLE",
+ },
+ {
+ 0x8DFD,
+ "GL_MAX_FRAGMENT_UNIFORM_VECTORS",
+ },
+ {
+ 0x8CDD,
+ "GL_FRAMEBUFFER_UNSUPPORTED",
+ },
+ {
+ 0x8CDF,
+ "GL_MAX_COLOR_ATTACHMENTS_EXT",
+ },
+ {
+ 0x90F3,
+ "GL_CONTEXT_ROBUST_ACCESS_EXT",
+ },
+ {
+ 0x90F2,
+ "GL_MAX_MULTIVIEW_BUFFERS_EXT",
+ },
+ {
+ 0x90F1,
+ "GL_MULTIVIEW_EXT",
+ },
+ {
+ 0x90F0,
+ "GL_COLOR_ATTACHMENT_EXT",
+ },
+ {
+ 0x803C,
+ "GL_ALPHA8_OES",
+ },
+ {
+ 0x84F5,
+ "GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM",
+ },
+ {
+ 0x882A,
+ "GL_DRAW_BUFFER5_EXT",
+ },
+ {
+ 0x80AA,
+ "GL_SAMPLE_COVERAGE_VALUE",
+ },
+ {
+ 0x84F6,
+ "GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM",
+ },
+ {
+ 0x80AB,
+ "GL_SAMPLE_COVERAGE_INVERT",
+ },
+ {
+ 0x8C41,
+ "GL_SRGB8_NV",
+ },
+ {
+ 0x8C40,
+ "GL_SRGB_EXT",
+ },
+ {
+ 0x882B,
+ "GL_DRAW_BUFFER6_EXT",
+ },
+ {
+ 0x8C17,
+ "GL_UNSIGNED_NORMALIZED_EXT",
+ },
+ {
+ 0x8A4A,
+ "GL_SKIP_DECODE_EXT",
+ },
+ {
+ 0x8A4F,
+ "GL_PROGRAM_PIPELINE_OBJECT_EXT",
+ },
+ {
+ 0x882C,
+ "GL_DRAW_BUFFER7_EXT",
+ },
+ {
+ 0x0010,
+ "GL_MAP_FLUSH_EXPLICIT_BIT_EXT",
+ },
+ {
+ 0x882D,
+ "GL_DRAW_BUFFER8_EXT",
+ },
+ {
+ 0x0BA6,
+ "GL_PATH_MODELVIEW_MATRIX_CHROMIUM",
+ },
+ {
+ 0x8F37,
+ "GL_COPY_WRITE_BUFFER_NV",
+ },
+ {
+ 0x8F36,
+ "GL_COPY_READ_BUFFER_NV",
+ },
+ {
+ 0x84FF,
+ "GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT",
+ },
+ {
+ 0x6000,
+ "GL_TEXTURE_POOL_CHROMIUM",
+ },
+ {
+ 0x0B74,
+ "GL_DEPTH_FUNC",
+ },
+ {
+ 0x8A49,
+ "GL_DECODE_EXT",
+ },
+ {
+ 0x881B,
+ "GL_RGB16F_EXT",
+ },
+ {
+ 0x0B71,
+ "GL_DEPTH_TEST",
+ },
+ {
+ 0x0B70,
+ "GL_DEPTH_RANGE",
+ },
+ {
+ 0x0B73,
+ "GL_DEPTH_CLEAR_VALUE",
+ },
+ {
+ 0x0B72,
+ "GL_DEPTH_WRITEMASK",
+ },
+ {
+ 0x85BA,
+ "GL_UNSIGNED_SHORT_8_8_APPLE",
+ },
+ {
+ 0x882E,
+ "GL_DRAW_BUFFER9_EXT",
+ },
+ {
+ 0x6001,
+ "GL_TEXTURE_POOL_MANAGED_CHROMIUM",
+ },
+ {
+ 0x8073,
+ "GL_MAX_3D_TEXTURE_SIZE_OES",
+ },
+ {
+ 0x8072,
+ "GL_TEXTURE_WRAP_R_OES",
+ },
+ {
+ 0x9289,
+ "GL_DST_OVER_NV",
+ },
+ {
+ 0x882F,
+ "GL_DRAW_BUFFER10_EXT",
+ },
+ {
+ 0x8074,
+ "GL_VERTEX_ARRAY_KHR",
+ },
+ {
+ 0x80E1,
+ "GL_BGRA_EXT",
+ },
+ {
+ 0x8ED7,
+ "GL_COVERAGE_AUTOMATIC_NV",
+ },
+ {
+ 0x8ED6,
+ "GL_COVERAGE_EDGE_FRAGMENTS_NV",
+ },
+ {
+ 0x8ED5,
+ "GL_COVERAGE_ALL_FRAGMENTS_NV",
+ },
+ {
+ 0x8ED4,
+ "GL_COVERAGE_SAMPLES_NV",
+ },
+ {
+ 0x8ED3,
+ "GL_COVERAGE_BUFFERS_NV",
+ },
+ {
+ 0x8ED2,
+ "GL_COVERAGE_ATTACHMENT_NV",
+ },
+ {
+ 0x8ED1,
+ "GL_COVERAGE_COMPONENT4_NV",
+ },
+ {
+ 0x8ED0,
+ "GL_COVERAGE_COMPONENT_NV",
+ },
+ {
+ 0x9288,
+ "GL_SRC_OVER_NV",
+ },
+ {
+ 0x800B,
+ "GL_FUNC_REVERSE_SUBTRACT",
+ },
+ {
+ 0x00000400,
+ "GL_STENCIL_BUFFER_BIT",
+ },
+ {
+ 0x800A,
+ "GL_FUNC_SUBTRACT",
+ },
+ {
+ 0x8E2C,
+ "GL_DEPTH_COMPONENT16_NONLINEAR_NV",
+ },
+ {
+ 0x889F,
+ "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING",
+ },
+ {
+ 0x8219,
+ "GL_FRAMEBUFFER_UNDEFINED_OES",
+ },
+ {
+ 0x8E22,
+ "GL_TRANSFORM_FEEDBACK",
+ },
+ {
+ 0x8E28,
+ "GL_TIMESTAMP_EXT",
+ },
+ {
+ 0x8006,
+ "GL_FUNC_ADD",
+ },
+ {
+ 0x8007,
+ "GL_MIN_EXT",
+ },
+ {
+ 0x8004,
+ "GL_ONE_MINUS_CONSTANT_ALPHA",
+ },
+ {
+ 0x8005,
+ "GL_BLEND_COLOR",
+ },
+ {
+ 0x8002,
+ "GL_ONE_MINUS_CONSTANT_COLOR",
+ },
+ {
+ 0x8003,
+ "GL_CONSTANT_ALPHA",
+ },
+ {
+ 0x8001,
+ "GL_CONSTANT_COLOR",
+ },
+ {
+ 0x0204,
+ "GL_GREATER",
+ },
+ {
+ 0x0205,
+ "GL_NOTEQUAL",
+ },
+ {
+ 0x0206,
+ "GL_GEQUAL",
+ },
+ {
+ 0x0207,
+ "GL_ALWAYS",
+ },
+ {
+ 0x0200,
+ "GL_NEVER",
+ },
+ {
+ 0x0201,
+ "GL_LESS",
+ },
+ {
+ 0x0202,
+ "GL_EQUAL",
+ },
+ {
+ 0x0203,
+ "GL_LEQUAL",
+ },
+ {
+ 0x2901,
+ "GL_REPEAT",
+ },
+ {
+ 0x92A0,
+ "GL_EXCLUSION_NV",
+ },
+ {
+ 0x93D8,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR",
+ },
+ {
+ 0x93D9,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR",
+ },
+ {
+ 0x8FB2,
+ "GL_GPU_OPTIMIZED_QCOM",
+ },
+ {
+ 0x190A,
+ "GL_LUMINANCE_ALPHA",
+ },
+ {
+ 0x8FB0,
+ "GL_BINNING_CONTROL_HINT_QCOM",
+ },
+ {
+ 0x92A1,
+ "GL_CONTRAST_NV",
+ },
+ {
+ 0x1E00,
+ "GL_KEEP",
+ },
+ {
+ 0x1E01,
+ "GL_REPLACE",
+ },
+ {
+ 0x1E02,
+ "GL_INCR",
+ },
+ {
+ 0x1E03,
+ "GL_DECR",
+ },
+ {
+ 0x93D6,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR",
+ },
+ {
+ 0x93D7,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR",
+ },
+ {
+ 0x93D4,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR",
+ },
+ {
+ 0x93D5,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR",
+ },
+ {
+ 0x0BE2,
+ "GL_BLEND",
+ },
+ {
+ 0x84CB,
+ "GL_TEXTURE11",
+ },
+ {
+ 0x8D55,
+ "GL_RENDERBUFFER_STENCIL_SIZE",
+ },
+ {
+ 0x8D54,
+ "GL_RENDERBUFFER_DEPTH_SIZE",
+ },
+ {
+ 0x8D57,
+ "GL_MAX_SAMPLES_ANGLE",
+ },
+ {
+ 0x8D56,
+ "GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE",
+ },
+ {
+ 0x8D51,
+ "GL_RENDERBUFFER_GREEN_SIZE",
+ },
+ {
+ 0x8D50,
+ "GL_RENDERBUFFER_RED_SIZE",
+ },
+ {
+ 0x8D53,
+ "GL_RENDERBUFFER_ALPHA_SIZE",
+ },
+ {
+ 0x8D52,
+ "GL_RENDERBUFFER_BLUE_SIZE",
+ },
+ {
+ 0x92A6,
+ "GL_VIVIDLIGHT_NV",
+ },
+ {
+ 0x78F1,
+ "GL_IMAGE_MAP_CHROMIUM",
+ },
+ {
+ 0x00080000,
+ "GL_STENCIL_BUFFER_BIT3_QCOM",
+ },
+ {
+ 0x92A7,
+ "GL_LINEARLIGHT_NV",
+ },
+ {
+ 0x886A,
+ "GL_VERTEX_ATTRIB_ARRAY_NORMALIZED",
+ },
+ {
+ 0x0C01,
+ "GL_DRAW_BUFFER_EXT",
+ },
+ {
+ 0x78F2,
+ "GL_IMAGE_SCANOUT_CHROMIUM",
+ },
+ {
+ 0x93C7,
+ "GL_COMPRESSED_RGBA_ASTC_6x5x5_OES",
+ },
+ {
+ 0x8B5F,
+ "GL_SAMPLER_3D_OES",
+ },
+ {
+ 0x8B95,
+ "GL_PALETTE8_RGB8_OES",
+ },
+ {
+ 0x9250,
+ "GL_SHADER_BINARY_DMP",
+ },
+ {
+ 0x10000000,
+ "GL_MULTISAMPLE_BUFFER_BIT4_QCOM",
+ },
+ {
+ 0x8C92,
+ "GL_ATC_RGB_AMD",
+ },
+ {
+ 0x9154,
+ "GL_VERTEX_ARRAY_OBJECT_EXT",
+ },
+ {
+ 0x9153,
+ "GL_QUERY_OBJECT_EXT",
+ },
+ {
+ 0x8864,
+ "GL_QUERY_COUNTER_BITS_EXT",
+ },
+ {
+ 0x9151,
+ "GL_BUFFER_OBJECT_EXT",
+ },
+ {
+ 0x8C93,
+ "GL_ATC_RGBA_EXPLICIT_ALPHA_AMD",
+ },
+ {
+ 0x00000002,
+ "GL_CONTEXT_FLAG_DEBUG_BIT_KHR",
+ },
+ {
+ 0x00000001,
+ "GL_SYNC_FLUSH_COMMANDS_BIT_APPLE",
+ },
+ {
+ 0x9248,
+ "GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM",
+ },
+ {
+ 0x00000004,
+ "GL_COLOR_BUFFER_BIT2_QCOM",
+ },
+ {
+ 0x1702,
+ "GL_TEXTURE",
+ },
+ {
+ 0x00000008,
+ "GL_COLOR_BUFFER_BIT3_QCOM",
+ },
+ {
+ 0x8B58,
+ "GL_BOOL_VEC3",
+ },
+ {
+ 0x8828,
+ "GL_DRAW_BUFFER3_EXT",
+ },
+ {
+ 0x8DF0,
+ "GL_LOW_FLOAT",
+ },
+ {
+ 0x1906,
+ "GL_ALPHA",
+ },
+ {
+ 0x1907,
+ "GL_RGB",
+ },
+ {
+ 0x8FBB,
+ "GL_GPU_DISJOINT_EXT",
+ },
+ {
+ 0x1902,
+ "GL_DEPTH_COMPONENT",
+ },
+ {
+ 0x8B56,
+ "GL_BOOL",
+ },
+ {
+ 0x93DB,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR",
+ },
+ {
+ 0x8B9B,
+ "GL_IMPLEMENTATION_COLOR_READ_FORMAT",
+ },
+ {
+ 0x8B9A,
+ "GL_IMPLEMENTATION_COLOR_READ_TYPE",
+ },
+ {
+ 0x93DA,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR",
+ },
+ {
+ 0x1908,
+ "GL_RGBA",
+ },
+ {
+ 0x8DF2,
+ "GL_HIGH_FLOAT",
+ },
+ {
+ 0x93DD,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR",
+ },
+ {
+ 0x8827,
+ "GL_DRAW_BUFFER2_EXT",
+ },
+ {
+ 0x9243,
+ "GL_UNPACK_COLORSPACE_CONVERSION_CHROMIUM",
+ },
+ {
+ 0x8DF3,
+ "GL_LOW_INT",
+ },
+ {
+ 0x82E8,
+ "GL_MAX_LABEL_LENGTH_KHR",
+ },
+ {
+ 0x82E6,
+ "GL_SAMPLER_KHR",
+ },
+ {
+ 0x0C02,
+ "GL_READ_BUFFER_EXT",
+ },
+ {
+ 0x82E3,
+ "GL_QUERY_KHR",
+ },
+ {
+ 0x82E2,
+ "GL_PROGRAM_KHR",
+ },
+ {
+ 0x82E1,
+ "GL_SHADER_KHR",
+ },
+ {
+ 0x8B52,
+ "GL_FLOAT_VEC4",
+ },
+ {
+ 0x9240,
+ "GL_UNPACK_FLIP_Y_CHROMIUM",
+ },
+ {
+ 0x8DF6,
+ "GL_UNSIGNED_INT_10_10_10_2_OES",
+ },
+ {
+ 0x8230,
+ "GL_RG32F_EXT",
+ },
+ {
+ 0x8DF7,
+ "GL_INT_10_10_10_2_OES",
+ },
+ {
+ 0x9246,
+ "GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM",
+ },
+ {
+ 0x8B69,
+ "GL_FLOAT_MAT4x2_NV",
+ },
+ {
+ 0x812D,
+ "GL_CLAMP_TO_BORDER_NV",
+ },
+ {
+ 0x812F,
+ "GL_CLAMP_TO_EDGE",
+ },
+ {
+ 0x86A3,
+ "GL_COMPRESSED_TEXTURE_FORMATS",
+ },
+ {
+ 0x9244,
+ "GL_BIND_GENERATES_RESOURCE_CHROMIUM",
+ },
+ {
+ 0x86A2,
+ "GL_NUM_COMPRESSED_TEXTURE_FORMATS",
+ },
+ {
+ 0x0CF3,
+ "GL_UNPACK_SKIP_ROWS_EXT",
+ },
+ {
+ 0x0CF2,
+ "GL_UNPACK_ROW_LENGTH_EXT",
+ },
+ {
+ 0x140C,
+ "GL_FIXED",
+ },
+ {
+ 0x8008,
+ "GL_MAX_EXT",
+ },
+ {
+ 0x0CF5,
+ "GL_UNPACK_ALIGNMENT",
+ },
+ {
+ 0x0CF4,
+ "GL_UNPACK_SKIP_PIXELS_EXT",
+ },
+ {
+ 0x8009,
+ "GL_BLEND_EQUATION",
+ },
+ {
+ 0x1401,
+ "GL_UNSIGNED_BYTE",
+ },
+ {
+ 0x1400,
+ "GL_BYTE",
+ },
+ {
+ 0x1403,
+ "GL_UNSIGNED_SHORT",
+ },
+ {
+ 0x1402,
+ "GL_SHORT",
+ },
+ {
+ 0x1405,
+ "GL_UNSIGNED_INT",
+ },
+ {
+ 0x1404,
+ "GL_INT",
+ },
+ {
+ 0x1406,
+ "GL_FLOAT",
+ },
+ {
+ 0x8043,
+ "GL_LUMINANCE4_ALPHA4_OES",
+ },
+ {
+ 0x8040,
+ "GL_LUMINANCE8_OES",
+ },
+ {
+ 0x8045,
+ "GL_LUMINANCE8_ALPHA8_OES",
+ },
+ {
+ 0x8CD1,
+ "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME",
+ },
+ {
+ 0x00040000,
+ "GL_STENCIL_BUFFER_BIT2_QCOM",
+ },
+ {
+ 0x8CD0,
+ "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE",
+ },
+ {
+ 0x8CE4,
+ "GL_COLOR_ATTACHMENT4_EXT",
+ },
+ {
+ 0x8CD3,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE",
+ },
+ {
+ 0x929E,
+ "GL_DIFFERENCE_NV",
+ },
+ {
+ 0x0B90,
+ "GL_STENCIL_TEST",
+ },
+ {
+ 0x8CD2,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL",
+ },
+ {
+ 0x881C,
+ "GL_ALPHA16F_EXT",
+ },
+ {
+ 0x928E,
+ "GL_SRC_ATOP_NV",
+ },
+ {
+ 0x8CD4,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES",
+ },
+ {
+ 0x9298,
+ "GL_LIGHTEN_NV",
+ },
+ {
+ 0x8CD7,
+ "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT",
+ },
+ {
+ 0x9112,
+ "GL_OBJECT_TYPE_APPLE",
+ },
+ {
+ 0x8038,
+ "GL_POLYGON_OFFSET_FACTOR",
+ },
+ {
+ 0x851A,
+ "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z",
+ },
+ {
+ 0x851C,
+ "GL_MAX_CUBE_MAP_TEXTURE_SIZE",
+ },
+ {
+ 0x8CD9,
+ "GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS",
+ },
+ {
+ 0x84CC,
+ "GL_TEXTURE12",
+ },
+ {
+ 0x0BA2,
+ "GL_VIEWPORT",
+ },
+ {
+ 0x84CA,
+ "GL_TEXTURE10",
+ },
+ {
+ 0x0BA7,
+ "GL_PATH_PROJECTION_MATRIX_CHROMIUM",
+ },
+ {
+ 0x84CF,
+ "GL_TEXTURE15",
+ },
+ {
+ 0x84CE,
+ "GL_TEXTURE14",
+ },
+ {
+ 0x84CD,
+ "GL_TEXTURE13",
+ },
+ {
+ 0x9115,
+ "GL_SYNC_FLAGS_APPLE",
+ },
+ {
+ 0x9286,
+ "GL_SRC_NV",
+ },
+ {
+ 0x83F3,
+ "GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE",
+ },
+ {
+ 0x83F2,
+ "GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE",
+ },
+ {
+ 0x83F1,
+ "GL_COMPRESSED_RGBA_S3TC_DXT1_EXT",
+ },
+ {
+ 0x9114,
+ "GL_SYNC_STATUS_APPLE",
+ },
+ {
+ 0x8C0A,
+ "GL_SGX_BINARY_IMG",
+ },
+ {
+ 0x9285,
+ "GL_BLEND_ADVANCED_COHERENT_NV",
+ },
+ {
+ 0x911C,
+ "GL_CONDITION_SATISFIED_APPLE",
+ },
+ {
+ 0x911B,
+ "GL_TIMEOUT_EXPIRED_APPLE",
+ },
+ {
+ 0x911A,
+ "GL_ALREADY_SIGNALED_APPLE",
+ },
+ {
+ 0x9284,
+ "GL_CONJOINT_NV",
+ },
+ {
+ 0x911D,
+ "GL_WAIT_FAILED_APPLE",
+ },
+ {
+ 0x929A,
+ "GL_COLORBURN_NV",
+ },
+ {
+ 0x929B,
+ "GL_HARDLIGHT_NV",
+ },
+ {
+ 0x929C,
+ "GL_SOFTLIGHT_NV",
+ },
+ {
+ 0x846D,
+ "GL_ALIASED_POINT_SIZE_RANGE",
+ },
+ {
+ 0x846E,
+ "GL_ALIASED_LINE_WIDTH_RANGE",
+ },
+ {
+ 0x929F,
+ "GL_MINUS_NV",
+ },
+ {
+ 0x9282,
+ "GL_UNCORRELATED_NV",
+ },
+ {
+ 0x9113,
+ "GL_SYNC_CONDITION_APPLE",
+ },
+ {
+ 0x93A4,
+ "GL_PACK_REVERSE_ROW_ORDER_ANGLE",
+ },
+ {
+ 0x9111,
+ "GL_MAX_SERVER_WAIT_TIMEOUT_APPLE",
+ },
+ {
+ 0x93A6,
+ "GL_PROGRAM_BINARY_ANGLE",
+ },
+ {
+ 0x9117,
+ "GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE",
+ },
+ {
+ 0x93A0,
+ "GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE",
+ },
+ {
+ 0x93A3,
+ "GL_FRAMEBUFFER_ATTACHMENT_ANGLE",
+ },
+ {
+ 0x93A2,
+ "GL_TEXTURE_USAGE_ANGLE",
+ },
+ {
+ 0x8802,
+ "GL_STENCIL_BACK_PASS_DEPTH_FAIL",
+ },
+ {
+ 0x9119,
+ "GL_SIGNALED_APPLE",
+ },
+ {
+ 0x9118,
+ "GL_UNSIGNALED_APPLE",
+ },
+ {
+ 0x9294,
+ "GL_MULTIPLY_NV",
+ },
+ {
+ 0x9295,
+ "GL_SCREEN_NV",
+ },
+ {
+ 0x9296,
+ "GL_OVERLAY_NV",
+ },
+ {
+ 0x9297,
+ "GL_DARKEN_NV",
+ },
+ {
+ 0x0020,
+ "GL_MAP_UNSYNCHRONIZED_BIT_EXT",
+ },
+ {
+ 0x8C01,
+ "GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG",
+ },
+ {
+ 0x8C00,
+ "GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG",
+ },
+ {
+ 0x8A52,
+ "GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT",
+ },
+ {
+ 0x8C02,
+ "GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG",
+ },
+ {
+ 0x84C9,
+ "GL_TEXTURE9",
+ },
+ {
+ 0x84C8,
+ "GL_TEXTURE8",
+ },
+ {
+ 0x8869,
+ "GL_MAX_VERTEX_ATTRIBS",
+ },
+ {
+ 0x84C3,
+ "GL_TEXTURE3",
+ },
+ {
+ 0x84C2,
+ "GL_TEXTURE2",
+ },
+ {
+ 0x84C1,
+ "GL_TEXTURE1",
+ },
+ {
+ 0x84C0,
+ "GL_TEXTURE0",
+ },
+ {
+ 0x84C7,
+ "GL_TEXTURE7",
+ },
+ {
+ 0x84C6,
+ "GL_TEXTURE6",
+ },
+ {
+ 0x84C5,
+ "GL_TEXTURE5",
+ },
+ {
+ 0x8803,
+ "GL_STENCIL_BACK_PASS_DEPTH_PASS",
+ },
+ {
+ 0x928A,
+ "GL_SRC_IN_NV",
+ },
+ {
+ 0x8518,
+ "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y",
+ },
+ {
+ 0x8519,
+ "GL_TEXTURE_CUBE_MAP_POSITIVE_Z",
+ },
+ {
+ 0x8514,
+ "GL_TEXTURE_BINDING_CUBE_MAP",
+ },
+ {
+ 0x8515,
+ "GL_TEXTURE_CUBE_MAP_POSITIVE_X",
+ },
+ {
+ 0x8516,
+ "GL_TEXTURE_CUBE_MAP_NEGATIVE_X",
+ },
+ {
+ 0x8517,
+ "GL_TEXTURE_CUBE_MAP_POSITIVE_Y",
+ },
+ {
+ 0x8513,
+ "GL_TEXTURE_CUBE_MAP",
+ },
+ {
+ 0x8626,
+ "GL_CURRENT_VERTEX_ATTRIB",
+ },
+ {
+ 0x92B1,
+ "GL_PLUS_CLAMPED_NV",
+ },
+ {
+ 0x92B0,
+ "GL_HSL_LUMINOSITY_NV",
+ },
+ {
+ 0x92B3,
+ "GL_MINUS_CLAMPED_NV",
+ },
+ {
+ 0x92B2,
+ "GL_PLUS_CLAMPED_ALPHA_NV",
+ },
+ {
+ 0x8765,
+ "GL_BUFFER_USAGE",
+ },
+ {
+ 0x8764,
+ "GL_BUFFER_SIZE",
+ },
+ {
+ 0x8B99,
+ "GL_PALETTE8_RGB5_A1_OES",
+ },
+ {
+ 0x0503,
+ "GL_STACK_OVERFLOW_KHR",
+ },
+ {
+ 0x0502,
+ "GL_INVALID_OPERATION",
+ },
+ {
+ 0x0501,
+ "GL_INVALID_VALUE",
+ },
+ {
+ 0x0500,
+ "GL_INVALID_ENUM",
+ },
+ {
+ 64,
+ "GL_MAILBOX_SIZE_CHROMIUM",
+ },
+ {
+ 0x0506,
+ "GL_INVALID_FRAMEBUFFER_OPERATION",
+ },
+ {
+ 0x0505,
+ "GL_OUT_OF_MEMORY",
+ },
+ {
+ 0x0504,
+ "GL_STACK_UNDERFLOW_KHR",
+ },
+ {
+ 0x0B44,
+ "GL_CULL_FACE",
+ },
+ {
+ 0x8B5E,
+ "GL_SAMPLER_2D",
+ },
+ {
+ 0x0B46,
+ "GL_FRONT_FACE",
+ },
+ {
+ 0x8FB3,
+ "GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM",
+ },
+ {
+ 0x824A,
+ "GL_DEBUG_SOURCE_APPLICATION_KHR",
+ },
+ {
+ 0x824B,
+ "GL_DEBUG_SOURCE_OTHER_KHR",
+ },
+ {
+ 0x824C,
+ "GL_DEBUG_TYPE_ERROR_KHR",
+ },
+ {
+ 0x824D,
+ "GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR",
+ },
+ {
+ 0x824E,
+ "GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR",
+ },
+ {
+ 0x824F,
+ "GL_DEBUG_TYPE_PORTABILITY_KHR",
+ },
+ {
+ 0x8B31,
+ "GL_VERTEX_SHADER",
+ },
+ {
+ 0x8B30,
+ "GL_FRAGMENT_SHADER",
+ },
+ {
+ 0x8FB1,
+ "GL_CPU_OPTIMIZED_QCOM",
+ },
+ {
+ 0x93D2,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR",
+ },
+ {
+ 0x8B5A,
+ "GL_FLOAT_MAT2",
+ },
+ {
+ 0x84D8,
+ "GL_TEXTURE24",
+ },
+ {
+ 0x84D9,
+ "GL_TEXTURE25",
+ },
+ {
+ 0x84D6,
+ "GL_TEXTURE22",
+ },
+ {
+ 0x84D7,
+ "GL_TEXTURE23",
+ },
+ {
+ 0x84D4,
+ "GL_TEXTURE20",
+ },
+ {
+ 0x0D05,
+ "GL_PACK_ALIGNMENT",
+ },
+ {
+ 0x84D2,
+ "GL_TEXTURE18",
+ },
+ {
+ 0x84D3,
+ "GL_TEXTURE19",
+ },
+ {
+ 0x84D0,
+ "GL_TEXTURE16",
+ },
+ {
+ 0x84D1,
+ "GL_TEXTURE17",
+ },
+ {
+ 0x93D1,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR",
+ },
+ {
+ 0x84DF,
+ "GL_TEXTURE31",
+ },
+ {
+ 0x8B97,
+ "GL_PALETTE8_R5_G6_B5_OES",
+ },
+ {
+ 0x84DD,
+ "GL_TEXTURE29",
+ },
+ {
+ 0x84DE,
+ "GL_TEXTURE30",
+ },
+ {
+ 0x84DB,
+ "GL_TEXTURE27",
+ },
+ {
+ 0x84DC,
+ "GL_TEXTURE28",
+ },
+ {
+ 0x6002,
+ "GL_TEXTURE_POOL_UNMANAGED_CHROMIUM",
+ },
+ {
+ 0x84DA,
+ "GL_TEXTURE26",
+ },
+ {
+ 0x8242,
+ "GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR",
+ },
+ {
+ 0x8243,
+ "GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR",
+ },
+ {
+ 0x8244,
+ "GL_DEBUG_CALLBACK_FUNCTION_KHR",
+ },
+ {
+ 0x8245,
+ "GL_DEBUG_CALLBACK_USER_PARAM_KHR",
+ },
+ {
+ 0x8246,
+ "GL_DEBUG_SOURCE_API_KHR",
+ },
+ {
+ 0x8247,
+ "GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR",
+ },
+ {
+ 0x8248,
+ "GL_DEBUG_SOURCE_SHADER_COMPILER_KHR",
+ },
+ {
+ 0x8249,
+ "GL_DEBUG_SOURCE_THIRD_PARTY_KHR",
+ },
+ {
+ 0x8B94,
+ "GL_PALETTE4_RGB5_A1_OES",
+ },
+ {
+ 0x00000040,
+ "GL_COLOR_BUFFER_BIT6_QCOM",
+ },
+ {
+ 0x8645,
+ "GL_VERTEX_ATTRIB_ARRAY_POINTER",
+ },
+ {
+ 0x8865,
+ "GL_CURRENT_QUERY_EXT",
+ },
+ {
+ 0x8866,
+ "GL_QUERY_RESULT_EXT",
+ },
+ {
+ 0x8867,
+ "GL_QUERY_RESULT_AVAILABLE_EXT",
+ },
+ {
+ 0x08000000,
+ "GL_MULTISAMPLE_BUFFER_BIT3_QCOM",
+ },
+ {
+ 0x87FA,
+ "GL_3DC_XY_AMD",
+ },
+ {
+ 0x84C4,
+ "GL_TEXTURE4",
+ },
+ {
+ 0x85B5,
+ "GL_VERTEX_ARRAY_BINDING_OES",
+ },
+ {
+ 0x8D6A,
+ "GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT",
+ },
+ {
+ 0x8D6C,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT",
+ },
+ {
+ 0x8252,
+ "GL_LOSE_CONTEXT_ON_RESET_EXT",
+ },
+ {
+ 0x8C4C,
+ "GL_COMPRESSED_SRGB_S3TC_DXT1_NV",
+ },
+ {
+ 0x8C4E,
+ "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV",
+ },
+ {
+ 0x1102,
+ "GL_NICEST",
+ },
+ {
+ 0x8C4F,
+ "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV",
+ },
+ {
+ 0x93E9,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES",
+ },
+ {
+ 0x93E8,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES",
+ },
+ {
+ 0x8C43,
+ "GL_SRGB8_ALPHA8_EXT",
+ },
+ {
+ 0x8C42,
+ "GL_SRGB_ALPHA_EXT",
+ },
+ {
+ 0x8C45,
+ "GL_SLUMINANCE8_ALPHA8_NV",
+ },
+ {
+ 0x8C44,
+ "GL_SLUMINANCE_ALPHA_NV",
+ },
+ {
+ 0x8C47,
+ "GL_SLUMINANCE8_NV",
+ },
+ {
+ 0x8C46,
+ "GL_SLUMINANCE_NV",
+ },
+ {
+ 0x93E1,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES",
+ },
+ {
+ 0x93E0,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES",
+ },
+ {
+ 0x93E3,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES",
+ },
+ {
+ 0x93E2,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES",
+ },
+ {
+ 0x93E5,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES",
+ },
+ {
+ 0x93E4,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES",
+ },
+ {
+ 0x93E7,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES",
+ },
+ {
+ 0x93E6,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES",
+ },
+ {
+ 0x8D68,
+ "GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES",
+ },
+ {
+ 0x1700,
+ "GL_PATH_MODELVIEW_CHROMIUM",
+ },
+ {
+ 0x85BB,
+ "GL_UNSIGNED_SHORT_8_8_REV_APPLE",
+ },
+ {
+ 0x8D61,
+ "GL_HALF_FLOAT_OES",
+ },
+ {
+ 0x8D62,
+ "GL_RGB565",
+ },
+ {
+ 0x8D64,
+ "GL_ETC1_RGB8_OES",
+ },
+ {
+ 0x8D65,
+ "GL_TEXTURE_EXTERNAL_OES",
+ },
+ {
+ 0x8D66,
+ "GL_SAMPLER_EXTERNAL_OES",
+ },
+ {
+ 0x8D67,
+ "GL_TEXTURE_BINDING_EXTERNAL_OES",
+ },
+ {
+ 0x04000000,
+ "GL_MULTISAMPLE_BUFFER_BIT2_QCOM",
+ },
+ {
+ 0x8CEE,
+ "GL_COLOR_ATTACHMENT14_EXT",
+ },
+ {
+ 0x1701,
+ "GL_PATH_PROJECTION_CHROMIUM",
+ },
+ {
+ 0x2800,
+ "GL_TEXTURE_MAG_FILTER",
+ },
+ {
+ 0x2801,
+ "GL_TEXTURE_MIN_FILTER",
+ },
+ {
+ 0x2802,
+ "GL_TEXTURE_WRAP_S",
+ },
+ {
+ 0x2803,
+ "GL_TEXTURE_WRAP_T",
+ },
+ {
+ 0x2703,
+ "GL_LINEAR_MIPMAP_LINEAR",
+ },
+ {
+ 0x8B98,
+ "GL_PALETTE8_RGBA4_OES",
+ },
+ {
+ 0x84F3,
+ "GL_FENCE_STATUS_NV",
+ },
+ {
+ 0x2702,
+ "GL_NEAREST_MIPMAP_LINEAR",
+ },
+ {
+ 0x1F03,
+ "GL_EXTENSIONS",
+ },
+ {
+ 0x1F02,
+ "GL_VERSION",
+ },
+ {
+ 0x1F01,
+ "GL_RENDERER",
+ },
+ {
+ 0x1F00,
+ "GL_VENDOR",
+ },
+ {
+ 0x9247,
+ "GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM",
+ },
+ {
+ 0x2701,
+ "GL_LINEAR_MIPMAP_NEAREST",
+ },
+ {
+ 0x9245,
+ "GL_OVERLAY_TRANSFORM_NONE_CHROMIUM",
+ },
+ {
+ 0x92B4,
+ "GL_INVERT_OVG_NV",
+ },
+ {
+ 0x9249,
+ "GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM",
+ },
+ {
+ 0x0B94,
+ "GL_STENCIL_FAIL",
+ },
+ {
+ 0x8B4C,
+ "GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS",
+ },
+ {
+ 0x8B4D,
+ "GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS",
+ },
+ {
+ 0x8B4F,
+ "GL_SHADER_TYPE",
+ },
+ {
+ 0x00004000,
+ "GL_COLOR_BUFFER_BIT",
+ },
+ {
+ 0x00000010,
+ "GL_COLOR_BUFFER_BIT4_QCOM",
+ },
+ {
+ 0x8834,
+ "GL_DRAW_BUFFER15_EXT",
+ },
+ {
+ 0x8833,
+ "GL_DRAW_BUFFER14_EXT",
+ },
+ {
+ 0x8832,
+ "GL_DRAW_BUFFER13_EXT",
+ },
+ {
+ 0x8831,
+ "GL_DRAW_BUFFER12_EXT",
+ },
+ {
+ 0x8830,
+ "GL_DRAW_BUFFER11_EXT",
+ },
+ {
+ 0x8DC5,
+ "GL_SAMPLER_CUBE_SHADOW_NV",
+ },
+ {
+ 0x93B8,
+ "GL_COMPRESSED_RGBA_ASTC_10x5_KHR",
+ },
+ {
+ 0x9241,
+ "GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM",
+ },
+ {
+ 0x00010000,
+ "GL_STENCIL_BUFFER_BIT0_QCOM",
+ },
+ {
+ 0x0B93,
+ "GL_STENCIL_VALUE_MASK",
+ },
+ {
+ 0x0B92,
+ "GL_STENCIL_FUNC",
+ },
+ {
+ 0x0B91,
+ "GL_STENCIL_CLEAR_VALUE",
+ },
+ {
+ 0x883D,
+ "GL_BLEND_EQUATION_ALPHA",
+ },
+ {
+ 0x0B97,
+ "GL_STENCIL_REF",
+ },
+ {
+ 0x0B96,
+ "GL_STENCIL_PASS_DEPTH_PASS",
+ },
+ {
+ 0x0B95,
+ "GL_STENCIL_PASS_DEPTH_FAIL",
+ },
+ {
+ 0x2700,
+ "GL_NEAREST_MIPMAP_NEAREST",
+ },
+ {
+ 0x0B98,
+ "GL_STENCIL_WRITEMASK",
+ },
+ {
+ 0x8B40,
+ "GL_PROGRAM_OBJECT_EXT",
+ },
+ {
+ 0x1004,
+ "GL_TEXTURE_BORDER_COLOR_NV",
+ },
+ {
+ 0x8B48,
+ "GL_SHADER_OBJECT_EXT",
+ },
+ {
+ 0x912F,
+ "GL_TEXTURE_IMMUTABLE_FORMAT_EXT",
+ },
+ {
+ 0x924A,
+ "GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM",
+ },
+ {
+ 0x20000000,
+ "GL_MULTISAMPLE_BUFFER_BIT5_QCOM",
+ },
+ {
+ 0x0DE1,
+ "GL_TEXTURE_2D",
+ },
+ {
+ 0x80C9,
+ "GL_BLEND_SRC_RGB",
+ },
+ {
+ 0x80C8,
+ "GL_BLEND_DST_RGB",
+ },
+ {
+ 0x8059,
+ "GL_RGB10_A2_EXT",
+ },
+ {
+ 0x8058,
+ "GL_RGBA8_OES",
+ },
+ {
+ 0x8B93,
+ "GL_PALETTE4_RGBA4_OES",
+ },
+ {
+ 0x00002000,
+ "GL_DEPTH_BUFFER_BIT5_QCOM",
+ },
+ {
+ 0x8051,
+ "GL_RGB8_OES",
+ },
+ {
+ 0x8052,
+ "GL_RGB10_EXT",
+ },
+ {
+ 0x8CAB,
+ "GL_RENDERBUFFER_SAMPLES_ANGLE",
+ },
+ {
+ 0x8057,
+ "GL_RGB5_A1",
+ },
+ {
+ 0x8056,
+ "GL_RGBA4",
+ },
+ {
+ 0x150A,
+ "GL_INVERT",
+ },
+ {
+ 0x01000000,
+ "GL_MULTISAMPLE_BUFFER_BIT0_QCOM",
+ },
+ {
+ 0x78ED,
+ "GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM",
+ },
+ {
+ 0x78EE,
+ "GL_PIXEL_PACK_TRANSFER_BUFFER_BINDING_CHROMIUM",
+ },
+ {
+ 0x78EF,
+ "GL_PIXEL_UNPACK_TRANSFER_BUFFER_BINDING_CHROMIUM",
+ },
+ {
+ 0x0B45,
+ "GL_CULL_FACE_MODE",
+ },
+ {
+ 0x8B92,
+ "GL_PALETTE4_R5_G6_B5_OES",
+ },
+ {
+ 0x00100000,
+ "GL_STENCIL_BUFFER_BIT4_QCOM",
+ },
+ {
+ 0x9299,
+ "GL_COLORDODGE_NV",
+ },
+ {
+ 0x8D20,
+ "GL_STENCIL_ATTACHMENT",
+ },
+ {
+ 0x8B91,
+ "GL_PALETTE4_RGBA8_OES",
+ },
+ {
+ 0x00000200,
+ "GL_DEPTH_BUFFER_BIT1_QCOM",
+ },
+ {
+ 0x00008000,
+ "GL_COVERAGE_BUFFER_BIT_NV",
+ },
+ {
+ 0x1506,
+ "GL_XOR_NV",
+ },
+ {
+ 0x8CA8,
+ "GL_READ_FRAMEBUFFER_ANGLE",
+ },
+ {
+ 0x8CA9,
+ "GL_DRAW_FRAMEBUFFER_ANGLE",
+ },
+ {
+ 0x8CA6,
+ "GL_FRAMEBUFFER_BINDING",
+ },
+ {
+ 0x8CA7,
+ "GL_RENDERBUFFER_BINDING",
+ },
+ {
+ 0x8CA4,
+ "GL_STENCIL_BACK_VALUE_MASK",
+ },
+ {
+ 0x8CA5,
+ "GL_STENCIL_BACK_WRITEMASK",
+ },
+ {
+ 0x8B90,
+ "GL_PALETTE4_RGB8_OES",
+ },
+ {
+ 0x8CA3,
+ "GL_STENCIL_BACK_REF",
+ },
+ {
+ 0x80CB,
+ "GL_BLEND_SRC_ALPHA",
+ },
+ {
+ 0x80CA,
+ "GL_BLEND_DST_ALPHA",
+ },
+ {
+ 0x8CE7,
+ "GL_COLOR_ATTACHMENT7_EXT",
+ },
+ {
+ 0x93B0,
+ "GL_COMPRESSED_RGBA_ASTC_4x4_KHR",
+ },
+ {
+ 0x93B1,
+ "GL_COMPRESSED_RGBA_ASTC_5x4_KHR",
+ },
+ {
+ 0x93B2,
+ "GL_COMPRESSED_RGBA_ASTC_5x5_KHR",
+ },
+ {
+ 0x93B3,
+ "GL_COMPRESSED_RGBA_ASTC_6x5_KHR",
+ },
+ {
+ 0x93B4,
+ "GL_COMPRESSED_RGBA_ASTC_6x6_KHR",
+ },
+ {
+ 0x93B5,
+ "GL_COMPRESSED_RGBA_ASTC_8x5_KHR",
+ },
+ {
+ 0x93B6,
+ "GL_COMPRESSED_RGBA_ASTC_8x6_KHR",
+ },
+ {
+ 0x93B7,
+ "GL_COMPRESSED_RGBA_ASTC_8x8_KHR",
+ },
+ {
+ 0x8CD6,
+ "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT",
+ },
+ {
+ 0x93B9,
+ "GL_COMPRESSED_RGBA_ASTC_10x6_KHR",
+ },
+ {
+ 0x8253,
+ "GL_GUILTY_CONTEXT_RESET_EXT",
+ },
+ {
+ 0x8CE5,
+ "GL_COLOR_ATTACHMENT5_EXT",
+ },
+ {
+ 0x8CE9,
+ "GL_COLOR_ATTACHMENT9_EXT",
+ },
+ {
+ 0x8B96,
+ "GL_PALETTE8_RGBA8_OES",
+ },
+ {
+ 0x8872,
+ "GL_MAX_TEXTURE_IMAGE_UNITS",
+ },
+ {
+ 0x8508,
+ "GL_DECR_WRAP",
+ },
+ {
+ 0x92AD,
+ "GL_HSL_HUE_NV",
+ },
+ {
+ 0x92AE,
+ "GL_HSL_SATURATION_NV",
+ },
+ {
+ 0x92AF,
+ "GL_HSL_COLOR_NV",
+ },
+ {
+ 0x8DC4,
+ "GL_SAMPLER_2D_ARRAY_SHADOW_NV",
+ },
+ {
+ 0x8507,
+ "GL_INCR_WRAP",
+ },
+ {
+ 0x8895,
+ "GL_ELEMENT_ARRAY_BUFFER_BINDING",
+ },
+ {
+ 0x8894,
+ "GL_ARRAY_BUFFER_BINDING",
+ },
+ {
+ 0x92A3,
+ "GL_INVERT_RGB_NV",
+ },
+ {
+ 0x92A4,
+ "GL_LINEARDODGE_NV",
+ },
+ {
+ 0x92A5,
+ "GL_LINEARBURN_NV",
+ },
+ {
+ 0x8893,
+ "GL_ELEMENT_ARRAY_BUFFER",
+ },
+ {
+ 0x8892,
+ "GL_ARRAY_BUFFER",
+ },
+ {
+ 0x92A8,
+ "GL_PINLIGHT_NV",
+ },
+ {
+ 0x92A9,
+ "GL_HARDMIX_NV",
+ },
+ {
+ 0x8BD8,
+ "GL_TEXTURE_IMAGE_VALID_QCOM",
+ },
+ {
+ 0x84D5,
+ "GL_TEXTURE21",
+ },
+ {
+ 0x9287,
+ "GL_DST_NV",
+ },
+ {
+ 0x93BA,
+ "GL_COMPRESSED_RGBA_ASTC_10x8_KHR",
+ },
+ {
+ 0x93BB,
+ "GL_COMPRESSED_RGBA_ASTC_10x10_KHR",
+ },
+ {
+ 0x93BC,
+ "GL_COMPRESSED_RGBA_ASTC_12x10_KHR",
+ },
+ {
+ 0x93BD,
+ "GL_COMPRESSED_RGBA_ASTC_12x12_KHR",
+ },
+ {
+ 0x84E8,
+ "GL_MAX_RENDERBUFFER_SIZE",
+ },
+ {
+ 0x9281,
+ "GL_BLEND_OVERLAP_NV",
+ },
+ {
+ 0x9280,
+ "GL_BLEND_PREMULTIPLIED_SRC_NV",
+ },
+ {
+ 0x8370,
+ "GL_MIRRORED_REPEAT",
+ },
+ {
+ 0x84E0,
+ "GL_ACTIVE_TEXTURE",
+ },
+ {
+ 0x8800,
+ "GL_STENCIL_BACK_FUNC",
+ },
+ {
+ 0x8801,
+ "GL_STENCIL_BACK_FAIL",
+ },
+ {
+ 0x0D33,
+ "GL_MAX_TEXTURE_SIZE",
+ },
+ {
+ 0x8624,
+ "GL_VERTEX_ATTRIB_ARRAY_STRIDE",
+ },
+ {
+ 0x8625,
+ "GL_VERTEX_ATTRIB_ARRAY_TYPE",
+ },
+ {
+ 0x8622,
+ "GL_VERTEX_ATTRIB_ARRAY_ENABLED",
+ },
+ {
+ 0x8623,
+ "GL_VERTEX_ATTRIB_ARRAY_SIZE",
+ },
+ {
+ 0x8DB9,
+ "GL_FRAMEBUFFER_SRGB_EXT",
+ },
+ {
+ 0x8259,
+ "GL_ACTIVE_PROGRAM_EXT",
+ },
+ {
+ 0x8258,
+ "GL_PROGRAM_SEPARABLE_EXT",
+ },
+ {
+ 0x8256,
+ "GL_RESET_NOTIFICATION_STRATEGY_EXT",
+ },
+ {
+ 0x8255,
+ "GL_UNKNOWN_CONTEXT_RESET_EXT",
+ },
+ {
+ 0x8254,
+ "GL_INNOCENT_CONTEXT_RESET_EXT",
+ },
+ {
+ 0x1100,
+ "GL_DONT_CARE",
+ },
+ {
+ 0x1101,
+ "GL_FASTEST",
+ },
+ {
+ 0x8251,
+ "GL_DEBUG_TYPE_OTHER_KHR",
+ },
+ {
+ 0x8250,
+ "GL_DEBUG_TYPE_PERFORMANCE_KHR",
+ },
+ {
+ 0x8CEB,
+ "GL_COLOR_ATTACHMENT11_EXT",
+ },
+ {
+ 0x8CEC,
+ "GL_COLOR_ATTACHMENT12_EXT",
+ },
+ {
+ 0x0408,
+ "GL_FRONT_AND_BACK",
+ },
+ {
+ 0x8CEA,
+ "GL_COLOR_ATTACHMENT10_EXT",
+ },
+ {
+ 0x8CEF,
+ "GL_COLOR_ATTACHMENT15_EXT",
+ },
+ {
+ 0x8CED,
+ "GL_COLOR_ATTACHMENT13_EXT",
+ },
+ {
+ 0x8829,
+ "GL_DRAW_BUFFER4_EXT",
+ },
+ {
+ 0x0404,
+ "GL_FRONT",
+ },
+ {
+ 0x0405,
+ "GL_BACK",
+ },
+ {
+ 0x88E1,
+ "GL_STREAM_READ",
+ },
+ {
+ 0x88E0,
+ "GL_STREAM_DRAW",
+ },
+ {
+ 0x88E4,
+ "GL_STATIC_DRAW",
+ },
+ {
+ 0x93C6,
+ "GL_COMPRESSED_RGBA_ASTC_5x5x5_OES",
+ },
+ {
+ 0x88E8,
+ "GL_DYNAMIC_DRAW",
+ },
+ {
+ 0x9291,
+ "GL_PLUS_NV",
+ },
+ {
+ 0x8CAA,
+ "GL_READ_FRAMEBUFFER_BINDING_ANGLE",
+ },
+ {
+ 0x93C5,
+ "GL_COMPRESSED_RGBA_ASTC_5x5x4_OES",
+ },
+ {
+ 0x40000000,
+ "GL_MULTISAMPLE_BUFFER_BIT6_QCOM",
+ },
+ {
+ 0x9116,
+ "GL_SYNC_FENCE_APPLE",
+ },
+ {
+ 0x93C4,
+ "GL_COMPRESSED_RGBA_ASTC_5x4x4_OES",
+ },
+ {
+ 0x88EE,
+ "GL_ETC1_SRGB8_NV",
+ },
+ {
+ 0x93C3,
+ "GL_COMPRESSED_RGBA_ASTC_4x4x4_OES",
+ },
+ {
+ 0x00000800,
+ "GL_DEPTH_BUFFER_BIT3_QCOM",
+ },
+ {
+ 0x1903,
+ "GL_RED_EXT",
+ },
+ {
+ 0x93C2,
+ "GL_COMPRESSED_RGBA_ASTC_4x4x3_OES",
+ },
+ {
+ 0x8CE2,
+ "GL_COLOR_ATTACHMENT2_EXT",
+ },
+ {
+ 0x8BC1,
+ "GL_COUNTER_RANGE_AMD",
+ },
+ {
+ 0x8CE0,
+ "GL_COLOR_ATTACHMENT0",
+ },
+ {
+ 0x8CE1,
+ "GL_COLOR_ATTACHMENT1_EXT",
+ },
+ {
+ 0x8CE6,
+ "GL_COLOR_ATTACHMENT6_EXT",
+ },
+ {
+ 0x93C1,
+ "GL_COMPRESSED_RGBA_ASTC_4x3x3_OES",
+ },
+ {
+ 0x8A1F,
+ "GL_RGB_422_APPLE",
+ },
+ {
+ 0x93DC,
+ "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR",
+ },
+ {
+ 0x9292,
+ "GL_PLUS_DARKER_NV",
+ },
+ {
+ 0x8CE8,
+ "GL_COLOR_ATTACHMENT8_EXT",
+ },
+ {
+ 0x93C0,
+ "GL_COMPRESSED_RGBA_ASTC_3x3x3_OES",
+ },
+ {
+ 0x0C23,
+ "GL_COLOR_WRITEMASK",
+ },
+ {
+ 0x0C22,
+ "GL_COLOR_CLEAR_VALUE",
+ },
+ {
+ 0x8823,
+ "GL_WRITEONLY_RENDERING_QCOM",
+ },
+ {
+ 0x8824,
+ "GL_MAX_DRAW_BUFFERS_EXT",
+ },
+ {
+ 0x825A,
+ "GL_PROGRAM_PIPELINE_BINDING_EXT",
+ },
+ {
+ 0x1909,
+ "GL_LUMINANCE",
+ },
+ {
+ 0x0D3A,
+ "GL_MAX_VIEWPORT_DIMS",
+ },
+ {
+ 0x8B53,
+ "GL_INT_VEC2",
+ },
+ {
+ 0x8826,
+ "GL_DRAW_BUFFER1_EXT",
+ },
+ {
+ 0x809E,
+ "GL_SAMPLE_ALPHA_TO_COVERAGE",
+ },
+ {
+ 0x8BC0,
+ "GL_COUNTER_TYPE_AMD",
+ },
+ {
+ 0x8BC3,
+ "GL_PERCENTAGE_AMD",
+ },
+ {
+ 0x8BC2,
+ "GL_UNSIGNED_INT64_AMD",
+ },
+ {
+ 0x8BC5,
+ "GL_PERFMON_RESULT_SIZE_AMD",
+ },
+ {
+ 0x8BC4,
+ "GL_PERFMON_RESULT_AVAILABLE_AMD",
+ },
+ {
+ 0x8BC6,
+ "GL_PERFMON_RESULT_AMD",
+ },
+};
+
+const GLES2Util::EnumToString* const GLES2Util::enum_to_string_table_ =
+ enum_to_string_table;
+const size_t GLES2Util::enum_to_string_table_len_ =
+ sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
+
+std::string GLES2Util::GetStringAttachment(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_COLOR_ATTACHMENT0, "GL_COLOR_ATTACHMENT0"},
+ {GL_DEPTH_ATTACHMENT, "GL_DEPTH_ATTACHMENT"},
+ {GL_STENCIL_ATTACHMENT, "GL_STENCIL_ATTACHMENT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBackbufferAttachment(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_COLOR_EXT, "GL_COLOR_EXT"},
+ {GL_DEPTH_EXT, "GL_DEPTH_EXT"},
+ {GL_STENCIL_EXT, "GL_STENCIL_EXT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBlitFilter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_NEAREST, "GL_NEAREST"}, {GL_LINEAR, "GL_LINEAR"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBufferParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_BUFFER_SIZE, "GL_BUFFER_SIZE"}, {GL_BUFFER_USAGE, "GL_BUFFER_USAGE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBufferTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ARRAY_BUFFER, "GL_ARRAY_BUFFER"},
+ {GL_ELEMENT_ARRAY_BUFFER, "GL_ELEMENT_ARRAY_BUFFER"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringBufferUsage(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_STREAM_DRAW, "GL_STREAM_DRAW"},
+ {GL_STATIC_DRAW, "GL_STATIC_DRAW"},
+ {GL_DYNAMIC_DRAW, "GL_DYNAMIC_DRAW"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringCapability(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_BLEND, "GL_BLEND"},
+ {GL_CULL_FACE, "GL_CULL_FACE"},
+ {GL_DEPTH_TEST, "GL_DEPTH_TEST"},
+ {GL_DITHER, "GL_DITHER"},
+ {GL_POLYGON_OFFSET_FILL, "GL_POLYGON_OFFSET_FILL"},
+ {GL_SAMPLE_ALPHA_TO_COVERAGE, "GL_SAMPLE_ALPHA_TO_COVERAGE"},
+ {GL_SAMPLE_COVERAGE, "GL_SAMPLE_COVERAGE"},
+ {GL_SCISSOR_TEST, "GL_SCISSOR_TEST"},
+ {GL_STENCIL_TEST, "GL_STENCIL_TEST"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringCmpFunction(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_NEVER, "GL_NEVER"},
+ {GL_LESS, "GL_LESS"},
+ {GL_EQUAL, "GL_EQUAL"},
+ {GL_LEQUAL, "GL_LEQUAL"},
+ {GL_GREATER, "GL_GREATER"},
+ {GL_NOTEQUAL, "GL_NOTEQUAL"},
+ {GL_GEQUAL, "GL_GEQUAL"},
+ {GL_ALWAYS, "GL_ALWAYS"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringCompressedTextureFormat(uint32_t value) {
+ return GLES2Util::GetQualifiedEnumString(NULL, 0, value);
+}
+
+std::string GLES2Util::GetStringDrawMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_POINTS, "GL_POINTS"},
+ {GL_LINE_STRIP, "GL_LINE_STRIP"},
+ {GL_LINE_LOOP, "GL_LINE_LOOP"},
+ {GL_LINES, "GL_LINES"},
+ {GL_TRIANGLE_STRIP, "GL_TRIANGLE_STRIP"},
+ {GL_TRIANGLE_FAN, "GL_TRIANGLE_FAN"},
+ {GL_TRIANGLES, "GL_TRIANGLES"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringDstBlendFactor(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ZERO, "GL_ZERO"},
+ {GL_ONE, "GL_ONE"},
+ {GL_SRC_COLOR, "GL_SRC_COLOR"},
+ {GL_ONE_MINUS_SRC_COLOR, "GL_ONE_MINUS_SRC_COLOR"},
+ {GL_DST_COLOR, "GL_DST_COLOR"},
+ {GL_ONE_MINUS_DST_COLOR, "GL_ONE_MINUS_DST_COLOR"},
+ {GL_SRC_ALPHA, "GL_SRC_ALPHA"},
+ {GL_ONE_MINUS_SRC_ALPHA, "GL_ONE_MINUS_SRC_ALPHA"},
+ {GL_DST_ALPHA, "GL_DST_ALPHA"},
+ {GL_ONE_MINUS_DST_ALPHA, "GL_ONE_MINUS_DST_ALPHA"},
+ {GL_CONSTANT_COLOR, "GL_CONSTANT_COLOR"},
+ {GL_ONE_MINUS_CONSTANT_COLOR, "GL_ONE_MINUS_CONSTANT_COLOR"},
+ {GL_CONSTANT_ALPHA, "GL_CONSTANT_ALPHA"},
+ {GL_ONE_MINUS_CONSTANT_ALPHA, "GL_ONE_MINUS_CONSTANT_ALPHA"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringEquation(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_FUNC_ADD, "GL_FUNC_ADD"},
+ {GL_FUNC_SUBTRACT, "GL_FUNC_SUBTRACT"},
+ {GL_FUNC_REVERSE_SUBTRACT, "GL_FUNC_REVERSE_SUBTRACT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringFaceMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_CW, "GL_CW"}, {GL_CCW, "GL_CCW"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringFaceType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_FRONT, "GL_FRONT"},
+ {GL_BACK, "GL_BACK"},
+ {GL_FRONT_AND_BACK, "GL_FRONT_AND_BACK"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringFrameBufferParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"},
+ {GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"},
+ {GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"},
+ {GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE,
+ "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringFrameBufferTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_FRAMEBUFFER, "GL_FRAMEBUFFER"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringGLState(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ACTIVE_TEXTURE, "GL_ACTIVE_TEXTURE"},
+ {GL_ALIASED_LINE_WIDTH_RANGE, "GL_ALIASED_LINE_WIDTH_RANGE"},
+ {GL_ALIASED_POINT_SIZE_RANGE, "GL_ALIASED_POINT_SIZE_RANGE"},
+ {GL_ALPHA_BITS, "GL_ALPHA_BITS"},
+ {GL_ARRAY_BUFFER_BINDING, "GL_ARRAY_BUFFER_BINDING"},
+ {GL_BLUE_BITS, "GL_BLUE_BITS"},
+ {GL_COMPRESSED_TEXTURE_FORMATS, "GL_COMPRESSED_TEXTURE_FORMATS"},
+ {GL_CURRENT_PROGRAM, "GL_CURRENT_PROGRAM"},
+ {GL_DEPTH_BITS, "GL_DEPTH_BITS"},
+ {GL_DEPTH_RANGE, "GL_DEPTH_RANGE"},
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, "GL_ELEMENT_ARRAY_BUFFER_BINDING"},
+ {GL_FRAMEBUFFER_BINDING, "GL_FRAMEBUFFER_BINDING"},
+ {GL_GENERATE_MIPMAP_HINT, "GL_GENERATE_MIPMAP_HINT"},
+ {GL_GREEN_BITS, "GL_GREEN_BITS"},
+ {GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ "GL_IMPLEMENTATION_COLOR_READ_FORMAT"},
+ {GL_IMPLEMENTATION_COLOR_READ_TYPE, "GL_IMPLEMENTATION_COLOR_READ_TYPE"},
+ {GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
+ "GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS"},
+ {GL_MAX_CUBE_MAP_TEXTURE_SIZE, "GL_MAX_CUBE_MAP_TEXTURE_SIZE"},
+ {GL_MAX_FRAGMENT_UNIFORM_VECTORS, "GL_MAX_FRAGMENT_UNIFORM_VECTORS"},
+ {GL_MAX_RENDERBUFFER_SIZE, "GL_MAX_RENDERBUFFER_SIZE"},
+ {GL_MAX_TEXTURE_IMAGE_UNITS, "GL_MAX_TEXTURE_IMAGE_UNITS"},
+ {GL_MAX_TEXTURE_SIZE, "GL_MAX_TEXTURE_SIZE"},
+ {GL_MAX_VARYING_VECTORS, "GL_MAX_VARYING_VECTORS"},
+ {GL_MAX_VERTEX_ATTRIBS, "GL_MAX_VERTEX_ATTRIBS"},
+ {GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, "GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS"},
+ {GL_MAX_VERTEX_UNIFORM_VECTORS, "GL_MAX_VERTEX_UNIFORM_VECTORS"},
+ {GL_MAX_VIEWPORT_DIMS, "GL_MAX_VIEWPORT_DIMS"},
+ {GL_NUM_COMPRESSED_TEXTURE_FORMATS, "GL_NUM_COMPRESSED_TEXTURE_FORMATS"},
+ {GL_NUM_SHADER_BINARY_FORMATS, "GL_NUM_SHADER_BINARY_FORMATS"},
+ {GL_PACK_ALIGNMENT, "GL_PACK_ALIGNMENT"},
+ {GL_RED_BITS, "GL_RED_BITS"},
+ {GL_RENDERBUFFER_BINDING, "GL_RENDERBUFFER_BINDING"},
+ {GL_SAMPLE_BUFFERS, "GL_SAMPLE_BUFFERS"},
+ {GL_SAMPLE_COVERAGE_INVERT, "GL_SAMPLE_COVERAGE_INVERT"},
+ {GL_SAMPLE_COVERAGE_VALUE, "GL_SAMPLE_COVERAGE_VALUE"},
+ {GL_SAMPLES, "GL_SAMPLES"},
+ {GL_SCISSOR_BOX, "GL_SCISSOR_BOX"},
+ {GL_SHADER_BINARY_FORMATS, "GL_SHADER_BINARY_FORMATS"},
+ {GL_SHADER_COMPILER, "GL_SHADER_COMPILER"},
+ {GL_SUBPIXEL_BITS, "GL_SUBPIXEL_BITS"},
+ {GL_STENCIL_BITS, "GL_STENCIL_BITS"},
+ {GL_TEXTURE_BINDING_2D, "GL_TEXTURE_BINDING_2D"},
+ {GL_TEXTURE_BINDING_CUBE_MAP, "GL_TEXTURE_BINDING_CUBE_MAP"},
+ {GL_UNPACK_ALIGNMENT, "GL_UNPACK_ALIGNMENT"},
+ {GL_UNPACK_FLIP_Y_CHROMIUM, "GL_UNPACK_FLIP_Y_CHROMIUM"},
+ {GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ "GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM"},
+ {GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+ "GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM"},
+ {GL_BIND_GENERATES_RESOURCE_CHROMIUM,
+ "GL_BIND_GENERATES_RESOURCE_CHROMIUM"},
+ {GL_VERTEX_ARRAY_BINDING_OES, "GL_VERTEX_ARRAY_BINDING_OES"},
+ {GL_VIEWPORT, "GL_VIEWPORT"},
+ {GL_BLEND_COLOR, "GL_BLEND_COLOR"},
+ {GL_BLEND_EQUATION_RGB, "GL_BLEND_EQUATION_RGB"},
+ {GL_BLEND_EQUATION_ALPHA, "GL_BLEND_EQUATION_ALPHA"},
+ {GL_BLEND_SRC_RGB, "GL_BLEND_SRC_RGB"},
+ {GL_BLEND_DST_RGB, "GL_BLEND_DST_RGB"},
+ {GL_BLEND_SRC_ALPHA, "GL_BLEND_SRC_ALPHA"},
+ {GL_BLEND_DST_ALPHA, "GL_BLEND_DST_ALPHA"},
+ {GL_COLOR_CLEAR_VALUE, "GL_COLOR_CLEAR_VALUE"},
+ {GL_DEPTH_CLEAR_VALUE, "GL_DEPTH_CLEAR_VALUE"},
+ {GL_STENCIL_CLEAR_VALUE, "GL_STENCIL_CLEAR_VALUE"},
+ {GL_COLOR_WRITEMASK, "GL_COLOR_WRITEMASK"},
+ {GL_CULL_FACE_MODE, "GL_CULL_FACE_MODE"},
+ {GL_DEPTH_FUNC, "GL_DEPTH_FUNC"},
+ {GL_DEPTH_WRITEMASK, "GL_DEPTH_WRITEMASK"},
+ {GL_FRONT_FACE, "GL_FRONT_FACE"},
+ {GL_LINE_WIDTH, "GL_LINE_WIDTH"},
+ {GL_POLYGON_OFFSET_FACTOR, "GL_POLYGON_OFFSET_FACTOR"},
+ {GL_POLYGON_OFFSET_UNITS, "GL_POLYGON_OFFSET_UNITS"},
+ {GL_STENCIL_FUNC, "GL_STENCIL_FUNC"},
+ {GL_STENCIL_REF, "GL_STENCIL_REF"},
+ {GL_STENCIL_VALUE_MASK, "GL_STENCIL_VALUE_MASK"},
+ {GL_STENCIL_BACK_FUNC, "GL_STENCIL_BACK_FUNC"},
+ {GL_STENCIL_BACK_REF, "GL_STENCIL_BACK_REF"},
+ {GL_STENCIL_BACK_VALUE_MASK, "GL_STENCIL_BACK_VALUE_MASK"},
+ {GL_STENCIL_WRITEMASK, "GL_STENCIL_WRITEMASK"},
+ {GL_STENCIL_BACK_WRITEMASK, "GL_STENCIL_BACK_WRITEMASK"},
+ {GL_STENCIL_FAIL, "GL_STENCIL_FAIL"},
+ {GL_STENCIL_PASS_DEPTH_FAIL, "GL_STENCIL_PASS_DEPTH_FAIL"},
+ {GL_STENCIL_PASS_DEPTH_PASS, "GL_STENCIL_PASS_DEPTH_PASS"},
+ {GL_STENCIL_BACK_FAIL, "GL_STENCIL_BACK_FAIL"},
+ {GL_STENCIL_BACK_PASS_DEPTH_FAIL, "GL_STENCIL_BACK_PASS_DEPTH_FAIL"},
+ {GL_STENCIL_BACK_PASS_DEPTH_PASS, "GL_STENCIL_BACK_PASS_DEPTH_PASS"},
+ {GL_BLEND, "GL_BLEND"},
+ {GL_CULL_FACE, "GL_CULL_FACE"},
+ {GL_DEPTH_TEST, "GL_DEPTH_TEST"},
+ {GL_DITHER, "GL_DITHER"},
+ {GL_POLYGON_OFFSET_FILL, "GL_POLYGON_OFFSET_FILL"},
+ {GL_SAMPLE_ALPHA_TO_COVERAGE, "GL_SAMPLE_ALPHA_TO_COVERAGE"},
+ {GL_SAMPLE_COVERAGE, "GL_SAMPLE_COVERAGE"},
+ {GL_SCISSOR_TEST, "GL_SCISSOR_TEST"},
+ {GL_STENCIL_TEST, "GL_STENCIL_TEST"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringGetMaxIndexType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
+ {GL_UNSIGNED_SHORT, "GL_UNSIGNED_SHORT"},
+ {GL_UNSIGNED_INT, "GL_UNSIGNED_INT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringGetTexParamTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_TEXTURE_2D, "GL_TEXTURE_2D"},
+ {GL_TEXTURE_CUBE_MAP, "GL_TEXTURE_CUBE_MAP"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringHintMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_FASTEST, "GL_FASTEST"},
+ {GL_NICEST, "GL_NICEST"},
+ {GL_DONT_CARE, "GL_DONT_CARE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringHintTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_GENERATE_MIPMAP_HINT, "GL_GENERATE_MIPMAP_HINT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringImageInternalFormat(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_RGB, "GL_RGB"}, {GL_RGBA, "GL_RGBA"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringImageUsage(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_MAP_CHROMIUM, "GL_MAP_CHROMIUM"},
+ {GL_SCANOUT_CHROMIUM, "GL_SCANOUT_CHROMIUM"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringIndexType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
+ {GL_UNSIGNED_SHORT, "GL_UNSIGNED_SHORT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringMatrixMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_PATH_PROJECTION_CHROMIUM, "GL_PATH_PROJECTION_CHROMIUM"},
+ {GL_PATH_MODELVIEW_CHROMIUM, "GL_PATH_MODELVIEW_CHROMIUM"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringPixelStore(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_PACK_ALIGNMENT, "GL_PACK_ALIGNMENT"},
+ {GL_UNPACK_ALIGNMENT, "GL_UNPACK_ALIGNMENT"},
+ {GL_UNPACK_FLIP_Y_CHROMIUM, "GL_UNPACK_FLIP_Y_CHROMIUM"},
+ {GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ "GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM"},
+ {GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+ "GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringPixelType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
+ {GL_UNSIGNED_SHORT_5_6_5, "GL_UNSIGNED_SHORT_5_6_5"},
+ {GL_UNSIGNED_SHORT_4_4_4_4, "GL_UNSIGNED_SHORT_4_4_4_4"},
+ {GL_UNSIGNED_SHORT_5_5_5_1, "GL_UNSIGNED_SHORT_5_5_5_1"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringProgramParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_DELETE_STATUS, "GL_DELETE_STATUS"},
+ {GL_LINK_STATUS, "GL_LINK_STATUS"},
+ {GL_VALIDATE_STATUS, "GL_VALIDATE_STATUS"},
+ {GL_INFO_LOG_LENGTH, "GL_INFO_LOG_LENGTH"},
+ {GL_ATTACHED_SHADERS, "GL_ATTACHED_SHADERS"},
+ {GL_ACTIVE_ATTRIBUTES, "GL_ACTIVE_ATTRIBUTES"},
+ {GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, "GL_ACTIVE_ATTRIBUTE_MAX_LENGTH"},
+ {GL_ACTIVE_UNIFORMS, "GL_ACTIVE_UNIFORMS"},
+ {GL_ACTIVE_UNIFORM_MAX_LENGTH, "GL_ACTIVE_UNIFORM_MAX_LENGTH"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringQueryObjectParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_QUERY_RESULT_EXT, "GL_QUERY_RESULT_EXT"},
+ {GL_QUERY_RESULT_AVAILABLE_EXT, "GL_QUERY_RESULT_AVAILABLE_EXT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringQueryParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_CURRENT_QUERY_EXT, "GL_CURRENT_QUERY_EXT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringQueryTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ANY_SAMPLES_PASSED_EXT, "GL_ANY_SAMPLES_PASSED_EXT"},
+ {GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT,
+ "GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT"},
+ {GL_COMMANDS_ISSUED_CHROMIUM, "GL_COMMANDS_ISSUED_CHROMIUM"},
+ {GL_LATENCY_QUERY_CHROMIUM, "GL_LATENCY_QUERY_CHROMIUM"},
+ {GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM,
+ "GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM"},
+ {GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM,
+ "GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM"},
+ {GL_COMMANDS_COMPLETED_CHROMIUM, "GL_COMMANDS_COMPLETED_CHROMIUM"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringReadPixelFormat(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ALPHA, "GL_ALPHA"}, {GL_RGB, "GL_RGB"}, {GL_RGBA, "GL_RGBA"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringReadPixelType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
+ {GL_UNSIGNED_SHORT_5_6_5, "GL_UNSIGNED_SHORT_5_6_5"},
+ {GL_UNSIGNED_SHORT_4_4_4_4, "GL_UNSIGNED_SHORT_4_4_4_4"},
+ {GL_UNSIGNED_SHORT_5_5_5_1, "GL_UNSIGNED_SHORT_5_5_5_1"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringRenderBufferFormat(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_RGBA4, "GL_RGBA4"},
+ {GL_RGB565, "GL_RGB565"},
+ {GL_RGB5_A1, "GL_RGB5_A1"},
+ {GL_DEPTH_COMPONENT16, "GL_DEPTH_COMPONENT16"},
+ {GL_STENCIL_INDEX8, "GL_STENCIL_INDEX8"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringRenderBufferParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_RENDERBUFFER_RED_SIZE, "GL_RENDERBUFFER_RED_SIZE"},
+ {GL_RENDERBUFFER_GREEN_SIZE, "GL_RENDERBUFFER_GREEN_SIZE"},
+ {GL_RENDERBUFFER_BLUE_SIZE, "GL_RENDERBUFFER_BLUE_SIZE"},
+ {GL_RENDERBUFFER_ALPHA_SIZE, "GL_RENDERBUFFER_ALPHA_SIZE"},
+ {GL_RENDERBUFFER_DEPTH_SIZE, "GL_RENDERBUFFER_DEPTH_SIZE"},
+ {GL_RENDERBUFFER_STENCIL_SIZE, "GL_RENDERBUFFER_STENCIL_SIZE"},
+ {GL_RENDERBUFFER_WIDTH, "GL_RENDERBUFFER_WIDTH"},
+ {GL_RENDERBUFFER_HEIGHT, "GL_RENDERBUFFER_HEIGHT"},
+ {GL_RENDERBUFFER_INTERNAL_FORMAT, "GL_RENDERBUFFER_INTERNAL_FORMAT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringRenderBufferTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_RENDERBUFFER, "GL_RENDERBUFFER"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringResetStatus(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_GUILTY_CONTEXT_RESET_ARB, "GL_GUILTY_CONTEXT_RESET_ARB"},
+ {GL_INNOCENT_CONTEXT_RESET_ARB, "GL_INNOCENT_CONTEXT_RESET_ARB"},
+ {GL_UNKNOWN_CONTEXT_RESET_ARB, "GL_UNKNOWN_CONTEXT_RESET_ARB"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringShaderBinaryFormat(uint32_t value) {
+ return GLES2Util::GetQualifiedEnumString(NULL, 0, value);
+}
+
+std::string GLES2Util::GetStringShaderParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_SHADER_TYPE, "GL_SHADER_TYPE"},
+ {GL_DELETE_STATUS, "GL_DELETE_STATUS"},
+ {GL_COMPILE_STATUS, "GL_COMPILE_STATUS"},
+ {GL_INFO_LOG_LENGTH, "GL_INFO_LOG_LENGTH"},
+ {GL_SHADER_SOURCE_LENGTH, "GL_SHADER_SOURCE_LENGTH"},
+ {GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+ "GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringShaderPrecision(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_LOW_FLOAT, "GL_LOW_FLOAT"},
+ {GL_MEDIUM_FLOAT, "GL_MEDIUM_FLOAT"},
+ {GL_HIGH_FLOAT, "GL_HIGH_FLOAT"},
+ {GL_LOW_INT, "GL_LOW_INT"},
+ {GL_MEDIUM_INT, "GL_MEDIUM_INT"},
+ {GL_HIGH_INT, "GL_HIGH_INT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringShaderType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_VERTEX_SHADER, "GL_VERTEX_SHADER"},
+ {GL_FRAGMENT_SHADER, "GL_FRAGMENT_SHADER"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringSrcBlendFactor(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ZERO, "GL_ZERO"},
+ {GL_ONE, "GL_ONE"},
+ {GL_SRC_COLOR, "GL_SRC_COLOR"},
+ {GL_ONE_MINUS_SRC_COLOR, "GL_ONE_MINUS_SRC_COLOR"},
+ {GL_DST_COLOR, "GL_DST_COLOR"},
+ {GL_ONE_MINUS_DST_COLOR, "GL_ONE_MINUS_DST_COLOR"},
+ {GL_SRC_ALPHA, "GL_SRC_ALPHA"},
+ {GL_ONE_MINUS_SRC_ALPHA, "GL_ONE_MINUS_SRC_ALPHA"},
+ {GL_DST_ALPHA, "GL_DST_ALPHA"},
+ {GL_ONE_MINUS_DST_ALPHA, "GL_ONE_MINUS_DST_ALPHA"},
+ {GL_CONSTANT_COLOR, "GL_CONSTANT_COLOR"},
+ {GL_ONE_MINUS_CONSTANT_COLOR, "GL_ONE_MINUS_CONSTANT_COLOR"},
+ {GL_CONSTANT_ALPHA, "GL_CONSTANT_ALPHA"},
+ {GL_ONE_MINUS_CONSTANT_ALPHA, "GL_ONE_MINUS_CONSTANT_ALPHA"},
+ {GL_SRC_ALPHA_SATURATE, "GL_SRC_ALPHA_SATURATE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringStencilOp(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_KEEP, "GL_KEEP"},
+ {GL_ZERO, "GL_ZERO"},
+ {GL_REPLACE, "GL_REPLACE"},
+ {GL_INCR, "GL_INCR"},
+ {GL_INCR_WRAP, "GL_INCR_WRAP"},
+ {GL_DECR, "GL_DECR"},
+ {GL_DECR_WRAP, "GL_DECR_WRAP"},
+ {GL_INVERT, "GL_INVERT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringStringType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_VENDOR, "GL_VENDOR"},
+ {GL_RENDERER, "GL_RENDERER"},
+ {GL_VERSION, "GL_VERSION"},
+ {GL_SHADING_LANGUAGE_VERSION, "GL_SHADING_LANGUAGE_VERSION"},
+ {GL_EXTENSIONS, "GL_EXTENSIONS"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureBindTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_TEXTURE_2D, "GL_TEXTURE_2D"},
+ {GL_TEXTURE_CUBE_MAP, "GL_TEXTURE_CUBE_MAP"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureFormat(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ALPHA, "GL_ALPHA"},
+ {GL_LUMINANCE, "GL_LUMINANCE"},
+ {GL_LUMINANCE_ALPHA, "GL_LUMINANCE_ALPHA"},
+ {GL_RGB, "GL_RGB"},
+ {GL_RGBA, "GL_RGBA"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureInternalFormat(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_ALPHA, "GL_ALPHA"},
+ {GL_LUMINANCE, "GL_LUMINANCE"},
+ {GL_LUMINANCE_ALPHA, "GL_LUMINANCE_ALPHA"},
+ {GL_RGB, "GL_RGB"},
+ {GL_RGBA, "GL_RGBA"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureInternalFormatStorage(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_RGB565, "GL_RGB565"},
+ {GL_RGBA4, "GL_RGBA4"},
+ {GL_RGB5_A1, "GL_RGB5_A1"},
+ {GL_ALPHA8_EXT, "GL_ALPHA8_EXT"},
+ {GL_LUMINANCE8_EXT, "GL_LUMINANCE8_EXT"},
+ {GL_LUMINANCE8_ALPHA8_EXT, "GL_LUMINANCE8_ALPHA8_EXT"},
+ {GL_RGB8_OES, "GL_RGB8_OES"},
+ {GL_RGBA8_OES, "GL_RGBA8_OES"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureMagFilterMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_NEAREST, "GL_NEAREST"}, {GL_LINEAR, "GL_LINEAR"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureMinFilterMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_NEAREST, "GL_NEAREST"},
+ {GL_LINEAR, "GL_LINEAR"},
+ {GL_NEAREST_MIPMAP_NEAREST, "GL_NEAREST_MIPMAP_NEAREST"},
+ {GL_LINEAR_MIPMAP_NEAREST, "GL_LINEAR_MIPMAP_NEAREST"},
+ {GL_NEAREST_MIPMAP_LINEAR, "GL_NEAREST_MIPMAP_LINEAR"},
+ {GL_LINEAR_MIPMAP_LINEAR, "GL_LINEAR_MIPMAP_LINEAR"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureParameter(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_TEXTURE_MAG_FILTER, "GL_TEXTURE_MAG_FILTER"},
+ {GL_TEXTURE_MIN_FILTER, "GL_TEXTURE_MIN_FILTER"},
+ {GL_TEXTURE_POOL_CHROMIUM, "GL_TEXTURE_POOL_CHROMIUM"},
+ {GL_TEXTURE_WRAP_S, "GL_TEXTURE_WRAP_S"},
+ {GL_TEXTURE_WRAP_T, "GL_TEXTURE_WRAP_T"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTexturePool(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_TEXTURE_POOL_MANAGED_CHROMIUM, "GL_TEXTURE_POOL_MANAGED_CHROMIUM"},
+ {GL_TEXTURE_POOL_UNMANAGED_CHROMIUM,
+ "GL_TEXTURE_POOL_UNMANAGED_CHROMIUM"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureTarget(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_TEXTURE_2D, "GL_TEXTURE_2D"},
+ {GL_TEXTURE_CUBE_MAP_POSITIVE_X, "GL_TEXTURE_CUBE_MAP_POSITIVE_X"},
+ {GL_TEXTURE_CUBE_MAP_NEGATIVE_X, "GL_TEXTURE_CUBE_MAP_NEGATIVE_X"},
+ {GL_TEXTURE_CUBE_MAP_POSITIVE_Y, "GL_TEXTURE_CUBE_MAP_POSITIVE_Y"},
+ {GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"},
+ {GL_TEXTURE_CUBE_MAP_POSITIVE_Z, "GL_TEXTURE_CUBE_MAP_POSITIVE_Z"},
+ {GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureUsage(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_NONE, "GL_NONE"},
+ {GL_FRAMEBUFFER_ATTACHMENT_ANGLE, "GL_FRAMEBUFFER_ATTACHMENT_ANGLE"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringTextureWrapMode(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_CLAMP_TO_EDGE, "GL_CLAMP_TO_EDGE"},
+ {GL_MIRRORED_REPEAT, "GL_MIRRORED_REPEAT"},
+ {GL_REPEAT, "GL_REPEAT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringVertexAttribType(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_BYTE, "GL_BYTE"},
+ {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
+ {GL_SHORT, "GL_SHORT"},
+ {GL_UNSIGNED_SHORT, "GL_UNSIGNED_SHORT"},
+ {GL_FLOAT, "GL_FLOAT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringVertexAttribute(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, "GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"},
+ {GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING,
+ "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"},
+ {GL_VERTEX_ATTRIB_ARRAY_ENABLED, "GL_VERTEX_ATTRIB_ARRAY_ENABLED"},
+ {GL_VERTEX_ATTRIB_ARRAY_SIZE, "GL_VERTEX_ATTRIB_ARRAY_SIZE"},
+ {GL_VERTEX_ATTRIB_ARRAY_STRIDE, "GL_VERTEX_ATTRIB_ARRAY_STRIDE"},
+ {GL_VERTEX_ATTRIB_ARRAY_TYPE, "GL_VERTEX_ATTRIB_ARRAY_TYPE"},
+ {GL_CURRENT_VERTEX_ATTRIB, "GL_CURRENT_VERTEX_ATTRIB"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+std::string GLES2Util::GetStringVertexPointer(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_VERTEX_ATTRIB_ARRAY_POINTER, "GL_VERTEX_ATTRIB_ARRAY_POINTER"},
+ };
+ return GLES2Util::GetQualifiedEnumString(
+ string_table, arraysize(string_table), value);
+}
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_UTILS_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc b/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc
new file mode 100644
index 0000000..d0e7e03
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc
@@ -0,0 +1,306 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+#include <limits>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2UtilTest : public testing:: Test {
+ protected:
+ GLES2Util util_;
+};
+
+TEST_F(GLES2UtilTest, SafeMultiplyUint32) {
+ uint32_t result = 0;
+ EXPECT_TRUE(SafeMultiplyUint32(2u, 3u, &result));
+ EXPECT_EQ(6u, result);
+ EXPECT_FALSE(SafeMultiplyUint32(0x80000000u, 2u, &result));
+ EXPECT_EQ(0u, result);
+ EXPECT_TRUE(SafeMultiplyUint32(0x2u, 0x7FFFFFFFu, &result));
+ EXPECT_EQ(0xFFFFFFFEu, result);
+ EXPECT_FALSE(SafeMultiplyUint32(2u, 0x80000000u, &result));
+ EXPECT_EQ(0u, result);
+}
+
+TEST_F(GLES2UtilTest, SafeAddUint32) {
+ uint32_t result = 0;
+ EXPECT_TRUE(SafeAddUint32(2u, 3u, &result));
+ EXPECT_EQ(5u, result);
+ EXPECT_FALSE(SafeAddUint32(0x80000000u, 0x80000000u, &result));
+ EXPECT_EQ(0u, result);
+ EXPECT_TRUE(SafeAddUint32(0xFFFFFFFEu, 0x1u, &result));
+ EXPECT_EQ(0xFFFFFFFFu, result);
+ EXPECT_FALSE(SafeAddUint32(0xFFFFFFFEu, 0x2u, &result));
+ EXPECT_EQ(0u, result);
+ EXPECT_TRUE(SafeAddUint32(0x1u, 0xFFFFFFFEu, &result));
+ EXPECT_EQ(0xFFFFFFFFu, result);
+ EXPECT_FALSE(SafeAddUint32(0x2u, 0xFFFFFFFEu, &result));
+ EXPECT_EQ(0u, result);
+}
+
+TEST_F(GLES2UtilTest, SafeAddInt32) {
+ int32_t result = 0;
+ const int32_t kMax = std::numeric_limits<int32_t>::max();
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ EXPECT_TRUE(SafeAddInt32(2, 3, &result));
+ EXPECT_EQ(5, result);
+ EXPECT_FALSE(SafeAddInt32(kMax, 1, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_TRUE(SafeAddInt32(kMin + 1, -1, &result));
+ EXPECT_EQ(kMin, result);
+ EXPECT_FALSE(SafeAddInt32(kMin, -1, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_TRUE(SafeAddInt32(kMax - 1, 1, &result));
+ EXPECT_EQ(kMax, result);
+ EXPECT_FALSE(SafeAddInt32(1, kMax, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_TRUE(SafeAddInt32(-1, kMin + 1, &result));
+ EXPECT_EQ(kMin, result);
+ EXPECT_FALSE(SafeAddInt32(-1, kMin, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_TRUE(SafeAddInt32(1, kMax - 1, &result));
+ EXPECT_EQ(kMax, result);
+}
+
+TEST_F(GLES2UtilTest, GLGetNumValuesReturned) {
+ EXPECT_EQ(0, util_.GLGetNumValuesReturned(GL_COMPRESSED_TEXTURE_FORMATS));
+ EXPECT_EQ(0, util_.GLGetNumValuesReturned(GL_SHADER_BINARY_FORMATS));
+
+ EXPECT_EQ(0, util_.num_compressed_texture_formats());
+ EXPECT_EQ(0, util_.num_shader_binary_formats());
+
+ util_.set_num_compressed_texture_formats(1);
+ util_.set_num_shader_binary_formats(2);
+
+ EXPECT_EQ(1, util_.GLGetNumValuesReturned(GL_COMPRESSED_TEXTURE_FORMATS));
+ EXPECT_EQ(2, util_.GLGetNumValuesReturned(GL_SHADER_BINARY_FORMATS));
+
+ EXPECT_EQ(1, util_.num_compressed_texture_formats());
+ EXPECT_EQ(2, util_.num_shader_binary_formats());
+}
+
+TEST_F(GLES2UtilTest, ComputeImageDataSizesFormats) {
+ const uint32_t kWidth = 16;
+ const uint32_t kHeight = 12;
+ uint32_t size;
+ uint32_t unpadded_row_size;
+ uint32_t padded_row_size;
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, 1, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 3, size);
+ EXPECT_EQ(kWidth * 3, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, 1, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 4, size);
+ EXPECT_EQ(kWidth * 4, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 1, size);
+ EXPECT_EQ(kWidth * 1, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 2, size);
+ EXPECT_EQ(kWidth * 2, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_BGRA_EXT, GL_UNSIGNED_BYTE, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 4, size);
+ EXPECT_EQ(kWidth * 4, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_ALPHA, GL_UNSIGNED_BYTE, 1, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 1, size);
+ EXPECT_EQ(kWidth * 1, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 2, size);
+ EXPECT_EQ(kWidth * 2, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_DEPTH_STENCIL_OES, GL_UNSIGNED_INT_24_8_OES, 1,
+ &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 4, size);
+ EXPECT_EQ(kWidth * 4, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+}
+
+TEST_F(GLES2UtilTest, ComputeImageDataSizeTypes) {
+ const uint32_t kWidth = 16;
+ const uint32_t kHeight = 12;
+ uint32_t size;
+ uint32_t unpadded_row_size;
+ uint32_t padded_row_size;
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, 1, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 4, size);
+ EXPECT_EQ(kWidth * 4, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 2, size);
+ EXPECT_EQ(kWidth * 2, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 2, size);
+ EXPECT_EQ(kWidth * 2, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 2, size);
+ EXPECT_EQ(kWidth * 2, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, 1, &size,
+ &unpadded_row_size, &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 4, size);
+ EXPECT_EQ(kWidth * 4, padded_row_size);
+ EXPECT_EQ(padded_row_size, unpadded_row_size);
+}
+
+TEST_F(GLES2UtilTest, ComputeImageDataSizesUnpackAlignment) {
+ const uint32_t kWidth = 19;
+ const uint32_t kHeight = 12;
+ uint32_t size;
+ uint32_t unpadded_row_size;
+ uint32_t padded_row_size;
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, 1, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ(kWidth * kHeight * 3, size);
+ EXPECT_EQ(kWidth * 3, unpadded_row_size);
+ EXPECT_EQ(kWidth * 3, padded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, 2, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ((kWidth * 3 + 1) * (kHeight - 1) +
+ kWidth * 3, size);
+ EXPECT_EQ(kWidth * 3, unpadded_row_size);
+ EXPECT_EQ(kWidth * 3 + 1, padded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, 4, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ((kWidth * 3 + 3) * (kHeight - 1) +
+ kWidth * 3, size);
+ EXPECT_EQ(kWidth * 3, unpadded_row_size);
+ EXPECT_EQ(kWidth * 3 + 3, padded_row_size);
+ EXPECT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, 8, &size, &unpadded_row_size,
+ &padded_row_size));
+ EXPECT_EQ((kWidth * 3 + 7) * (kHeight - 1) +
+ kWidth * 3, size);
+ EXPECT_EQ(kWidth * 3, unpadded_row_size);
+ EXPECT_EQ(kWidth * 3 + 7, padded_row_size);
+}
+
+TEST_F(GLES2UtilTest, RenderbufferBytesPerPixel) {
+ EXPECT_EQ(1u, GLES2Util::RenderbufferBytesPerPixel(GL_STENCIL_INDEX8));
+ EXPECT_EQ(2u, GLES2Util::RenderbufferBytesPerPixel(GL_RGBA4));
+ EXPECT_EQ(2u, GLES2Util::RenderbufferBytesPerPixel(GL_RGB565));
+ EXPECT_EQ(2u, GLES2Util::RenderbufferBytesPerPixel(GL_RGB5_A1));
+ EXPECT_EQ(2u, GLES2Util::RenderbufferBytesPerPixel(GL_DEPTH_COMPONENT16));
+ EXPECT_EQ(4u, GLES2Util::RenderbufferBytesPerPixel(GL_RGB));
+ EXPECT_EQ(4u, GLES2Util::RenderbufferBytesPerPixel(GL_RGBA));
+ EXPECT_EQ(
+ 4u, GLES2Util::RenderbufferBytesPerPixel(GL_DEPTH24_STENCIL8_OES));
+ EXPECT_EQ(4u, GLES2Util::RenderbufferBytesPerPixel(GL_RGB8_OES));
+ EXPECT_EQ(4u, GLES2Util::RenderbufferBytesPerPixel(GL_RGBA8_OES));
+ EXPECT_EQ(
+ 4u, GLES2Util::RenderbufferBytesPerPixel(GL_DEPTH_COMPONENT24_OES));
+ EXPECT_EQ(0u, GLES2Util::RenderbufferBytesPerPixel(-1));
+}
+
+TEST_F(GLES2UtilTest, GetChannelsForCompressedFormat) {
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(GL_ETC1_RGB8_OES));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(GL_ATC_RGB_AMD));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG));
+ EXPECT_EQ(0u, GLES2Util::GetChannelsForFormat(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG));
+}
+
+namespace {
+
+void CheckParseUniformName(
+ const char* name,
+ bool expected_success,
+ size_t expected_array_pos,
+ int expected_index,
+ bool expected_getting_array) {
+ int index = 1234;
+ size_t array_pos = 1244;
+ bool getting_array = false;
+ bool success = GLES2Util::ParseUniformName(
+ name, &array_pos, &index, &getting_array);
+ EXPECT_EQ(expected_success, success);
+ if (success) {
+ EXPECT_EQ(expected_array_pos, array_pos);
+ EXPECT_EQ(expected_index, index);
+ EXPECT_EQ(expected_getting_array, getting_array);
+ }
+}
+
+} // anonymous namespace
+
+TEST_F(GLES2UtilTest, ParseUniformName) {
+ CheckParseUniformName("u_name", true, std::string::npos, 0, false);
+ CheckParseUniformName("u_name[]", false, std::string::npos, 0, false);
+ CheckParseUniformName("u_name]", false, std::string::npos, 0, false);
+ CheckParseUniformName("u_name[0a]", false, std::string::npos, 0, false);
+ CheckParseUniformName("u_name[a0]", false, std::string::npos, 0, false);
+ CheckParseUniformName("u_name[0a0]", false, std::string::npos, 0, false);
+ CheckParseUniformName("u_name[0]", true, 6u, 0, true);
+ CheckParseUniformName("u_name[2]", true, 6u, 2, true);
+ CheckParseUniformName("u_name[02]", true, 6u, 2, true);
+ CheckParseUniformName("u_name[20]", true, 6u, 20, true);
+ CheckParseUniformName("u_name[020]", true, 6u, 20, true);
+ CheckParseUniformName("u_name[0][0]", true, 9u, 0, true);
+ CheckParseUniformName("u_name[3][2]", true, 9u, 2, true);
+ CheckParseUniformName("u_name[03][02]", true, 10u, 2, true);
+ CheckParseUniformName("u_name[30][20]", true, 10u, 20, true);
+ CheckParseUniformName("u_name[030][020]", true, 11u, 20, true);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/common/gles2_utils_export.h b/gpu/command_buffer/common/gles2_utils_export.h
new file mode 100644
index 0000000..ec7da8a
--- /dev/null
+++ b/gpu/command_buffer/common/gles2_utils_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_UTILS_EXPORT_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_UTILS_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GLES2_UTILS_IMPLEMENTATION)
+#define GLES2_UTILS_EXPORT __declspec(dllexport)
+#else
+#define GLES2_UTILS_EXPORT __declspec(dllimport)
+#endif // defined(GLES2_UTILS_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GLES2_UTILS_IMPLEMENTATION)
+#define GLES2_UTILS_EXPORT __attribute__((visibility("default")))
+#else
+#define GLES2_UTILS_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GLES2_UTILS_EXPORT
+#endif
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_UTILS_EXPORT_H_
diff --git a/gpu/command_buffer/common/gpu_memory_allocation.h b/gpu/command_buffer/common/gpu_memory_allocation.h
new file mode 100644
index 0000000..facbd7f
--- /dev/null
+++ b/gpu/command_buffer/common/gpu_memory_allocation.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_ALLOCATION_H_
+#define GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_ALLOCATION_H_
+
+#include "base/basictypes.h"
+
+namespace gpu {
+
+// These are per context memory allocation limits set by the GpuMemoryManager
+// and assigned to the browser and renderer context.
+// They will change over time, given memory availability, and browser state.
+struct MemoryAllocation {
+ enum PriorityCutoff {
+ // Allow no allocations.
+ CUTOFF_ALLOW_NOTHING,
+ // Allow only allocations that are strictly required for correct rendering.
+ // For compositors, this is what is visible.
+ CUTOFF_ALLOW_REQUIRED_ONLY,
+ // Allow allocations that are not strictly needed for correct rendering, but
+ // are nice to have for performance. For compositors, this includes textures
+ // that are a few screens away from being visible.
+ CUTOFF_ALLOW_NICE_TO_HAVE,
+ // Allow all allocations.
+ CUTOFF_ALLOW_EVERYTHING,
+ CUTOFF_LAST = CUTOFF_ALLOW_EVERYTHING
+ };
+
+ // Limits when this renderer is visible.
+ uint64 bytes_limit_when_visible;
+ PriorityCutoff priority_cutoff_when_visible;
+
+ MemoryAllocation()
+ : bytes_limit_when_visible(0),
+ priority_cutoff_when_visible(CUTOFF_ALLOW_NOTHING) {
+ }
+
+ MemoryAllocation(uint64 bytes_limit_when_visible)
+ : bytes_limit_when_visible(bytes_limit_when_visible),
+ priority_cutoff_when_visible(CUTOFF_ALLOW_EVERYTHING) {
+ }
+
+ bool Equals(const MemoryAllocation& other) const {
+ return bytes_limit_when_visible ==
+ other.bytes_limit_when_visible &&
+ priority_cutoff_when_visible == other.priority_cutoff_when_visible;
+ }
+};
+
+} // namespace content
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_ALLOCATION_H_
diff --git a/gpu/command_buffer/common/id_allocator.cc b/gpu/command_buffer/common/id_allocator.cc
new file mode 100644
index 0000000..507b14e
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of IdAllocator.
+
+#include "gpu/command_buffer/common/id_allocator.h"
+
+#include "base/logging.h"
+
+namespace gpu {
+
+IdAllocator::IdAllocator() {}
+
+IdAllocator::~IdAllocator() {}
+
+ResourceId IdAllocator::AllocateID() {
+ ResourceId id;
+ ResourceIdSet::iterator iter = free_ids_.begin();
+ if (iter != free_ids_.end()) {
+ id = *iter;
+ } else {
+ id = LastUsedId() + 1;
+ if (!id) {
+ // We wrapped around to 0.
+ id = FindFirstUnusedId();
+ }
+ }
+ MarkAsUsed(id);
+ return id;
+}
+
+ResourceId IdAllocator::AllocateIDAtOrAbove(ResourceId desired_id) {
+ ResourceId id;
+ ResourceIdSet::iterator iter = free_ids_.lower_bound(desired_id);
+ if (iter != free_ids_.end()) {
+ id = *iter;
+ } else if (LastUsedId() < desired_id) {
+ id = desired_id;
+ } else {
+ id = LastUsedId() + 1;
+ if (!id) {
+ // We wrapped around to 0.
+ id = FindFirstUnusedId();
+ }
+ }
+ MarkAsUsed(id);
+ return id;
+}
+
+bool IdAllocator::MarkAsUsed(ResourceId id) {
+ DCHECK(id);
+ free_ids_.erase(id);
+ std::pair<ResourceIdSet::iterator, bool> result = used_ids_.insert(id);
+ return result.second;
+}
+
+void IdAllocator::FreeID(ResourceId id) {
+ if (id) {
+ used_ids_.erase(id);
+ free_ids_.insert(id);
+ }
+}
+
+bool IdAllocator::InUse(ResourceId id) const {
+ return id == kInvalidResource || used_ids_.find(id) != used_ids_.end();
+}
+
+ResourceId IdAllocator::LastUsedId() const {
+ if (used_ids_.empty()) {
+ return 0u;
+ } else {
+ return *used_ids_.rbegin();
+ }
+}
+
+ResourceId IdAllocator::FindFirstUnusedId() const {
+ ResourceId id = 1;
+ for (ResourceIdSet::const_iterator it = used_ids_.begin();
+ it != used_ids_.end(); ++it) {
+ if ((*it) != id) {
+ return id;
+ }
+ ++id;
+ }
+ return id;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/id_allocator.h b/gpu/command_buffer/common/id_allocator.h
new file mode 100644
index 0000000..b877083
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the IdAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <set>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+// A resource ID, key to the resource maps.
+typedef uint32_t ResourceId;
+// Invalid resource ID.
+static const ResourceId kInvalidResource = 0u;
+
+// A class to manage the allocation of resource IDs.
+class GPU_EXPORT IdAllocator {
+ public:
+ IdAllocator();
+ ~IdAllocator();
+
+ // Allocates a new resource ID.
+ ResourceId AllocateID();
+
+ // Allocates an Id starting at or above desired_id.
+ // Note: may wrap if it starts near limit.
+ ResourceId AllocateIDAtOrAbove(ResourceId desired_id);
+
+ // Marks an id as used. Returns false if id was already used.
+ bool MarkAsUsed(ResourceId id);
+
+ // Frees a resource ID.
+ void FreeID(ResourceId id);
+
+ // Checks whether or not a resource ID is in use.
+ bool InUse(ResourceId id) const;
+
+ private:
+ // TODO(gman): This would work much better with ranges or a hash table.
+ typedef std::set<ResourceId> ResourceIdSet;
+
+ // The highest ID on the used list.
+ ResourceId LastUsedId() const;
+
+ // Lowest ID that isn't on the used list. This is slow, use as a last resort.
+ ResourceId FindFirstUnusedId() const;
+
+ ResourceIdSet used_ids_;
+ ResourceIdSet free_ids_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdAllocator);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
diff --git a/gpu/command_buffer/common/id_allocator_test.cc b/gpu/command_buffer/common/id_allocator_test.cc
new file mode 100644
index 0000000..5d32e40
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator_test.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file has the unit tests for the IdAllocator class.
+
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class IdAllocatorTest : public testing::Test {
+ protected:
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+
+ IdAllocator* id_allocator() { return &id_allocator_; }
+
+ private:
+ IdAllocator id_allocator_;
+};
+
+// Checks basic functionality: AllocateID, FreeID, InUse.
+TEST_F(IdAllocatorTest, TestBasic) {
+ IdAllocator *allocator = id_allocator();
+ // Check that resource 1 is not in use
+ EXPECT_FALSE(allocator->InUse(1));
+
+ // Allocate an ID, check that it's in use.
+ ResourceId id1 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id1));
+
+ // Allocate another ID, check that it's in use, and different from the first
+ // one.
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_NE(id1, id2);
+
+ // Free one of the IDs, check that it's not in use any more.
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+
+ // Frees the other ID, check that it's not in use any more.
+ allocator->FreeID(id2);
+ EXPECT_FALSE(allocator->InUse(id2));
+}
+
+// Checks that the resource IDs are re-used after being freed.
+TEST_F(IdAllocatorTest, TestAdvanced) {
+ IdAllocator *allocator = id_allocator();
+
+ // Allocate the highest possible ID, to make life awkward.
+ allocator->AllocateIDAtOrAbove(~static_cast<ResourceId>(0));
+
+ // Allocate a significant number of resources.
+ const unsigned int kNumResources = 100;
+ ResourceId ids[kNumResources];
+ for (unsigned int i = 0; i < kNumResources; ++i) {
+ ids[i] = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(ids[i]));
+ }
+
+ // Check that a new allocation re-uses the resource we just freed.
+ ResourceId id1 = ids[kNumResources / 2];
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_EQ(id1, id2);
+}
+
+// Checks that we can choose our own ids and they won't be reused.
+TEST_F(IdAllocatorTest, MarkAsUsed) {
+ IdAllocator* allocator = id_allocator();
+ ResourceId id = allocator->AllocateID();
+ allocator->FreeID(id);
+ EXPECT_FALSE(allocator->InUse(id));
+ EXPECT_TRUE(allocator->MarkAsUsed(id));
+ EXPECT_TRUE(allocator->InUse(id));
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_NE(id, id2);
+ EXPECT_TRUE(allocator->MarkAsUsed(id2 + 1));
+ ResourceId id3 = allocator->AllocateID();
+ // Checks our algorithm. If the algorithm changes this check should be
+ // changed.
+ EXPECT_EQ(id3, id2 + 2);
+}
+
+// Checks AllocateIdAtOrAbove.
+TEST_F(IdAllocatorTest, AllocateIdAtOrAbove) {
+ const ResourceId kOffset = 123456;
+ IdAllocator* allocator = id_allocator();
+ ResourceId id1 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_EQ(kOffset, id1);
+ ResourceId id2 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_GT(id2, kOffset);
+ ResourceId id3 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_GT(id3, kOffset);
+}
+
+// Checks that AllocateIdAtOrAbove wraps around at the maximum value.
+TEST_F(IdAllocatorTest, AllocateIdAtOrAboveWrapsAround) {
+ const ResourceId kMaxPossibleOffset = ~static_cast<ResourceId>(0);
+ IdAllocator* allocator = id_allocator();
+ ResourceId id1 = allocator->AllocateIDAtOrAbove(kMaxPossibleOffset);
+ EXPECT_EQ(kMaxPossibleOffset, id1);
+ ResourceId id2 = allocator->AllocateIDAtOrAbove(kMaxPossibleOffset);
+ EXPECT_EQ(1u, id2);
+ ResourceId id3 = allocator->AllocateIDAtOrAbove(kMaxPossibleOffset);
+ EXPECT_EQ(2u, id3);
+}
+
+TEST_F(IdAllocatorTest, RedundantFreeIsIgnored) {
+ IdAllocator* allocator = id_allocator();
+ ResourceId id1 = allocator->AllocateID();
+ allocator->FreeID(0);
+ allocator->FreeID(id1);
+ allocator->FreeID(id1);
+ allocator->FreeID(id1 + 1);
+
+ ResourceId id2 = allocator->AllocateID();
+ ResourceId id3 = allocator->AllocateID();
+ EXPECT_NE(id2, id3);
+ EXPECT_NE(kInvalidResource, id2);
+ EXPECT_NE(kInvalidResource, id3);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/mailbox.cc b/gpu/command_buffer/common/mailbox.cc
new file mode 100644
index 0000000..21602ab
--- /dev/null
+++ b/gpu/command_buffer/common/mailbox.cc
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/mailbox.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/rand_util.h"
+
+namespace gpu {
+
+Mailbox::Mailbox() {
+ memset(name, 0, sizeof(name));
+}
+
+bool Mailbox::IsZero() const {
+ for (size_t i = 0; i < arraysize(name); ++i) {
+ if (name[i])
+ return false;
+ }
+ return true;
+}
+
+void Mailbox::SetZero() {
+ memset(name, 0, sizeof(name));
+}
+
+void Mailbox::SetName(const int8* n) {
+ DCHECK(IsZero() || !memcmp(name, n, sizeof(name)));
+ memcpy(name, n, sizeof(name));
+}
+
+Mailbox Mailbox::Generate() {
+ Mailbox result;
+ // Generates cryptographically-secure bytes.
+ base::RandBytes(result.name, sizeof(result.name));
+#if !defined(NDEBUG)
+ int8 value = 1;
+ for (size_t i = 1; i < sizeof(result.name); ++i)
+ value ^= result.name[i];
+ result.name[0] = value;
+#endif
+ return result;
+}
+
+bool Mailbox::Verify() const {
+#if !defined(NDEBUG)
+ int8 value = 1;
+ for (size_t i = 0; i < sizeof(name); ++i)
+ value ^= name[i];
+ return value == 0;
+#else
+ return true;
+#endif
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/mailbox.h b/gpu/command_buffer/common/mailbox.h
new file mode 100644
index 0000000..a45c91f
--- /dev/null
+++ b/gpu/command_buffer/common/mailbox.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_MAILBOX_H_
+#define GPU_COMMAND_BUFFER_MAILBOX_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include "gpu/gpu_export.h"
+
+// From gl2/gl2ext.h.
+#ifndef GL_MAILBOX_SIZE_CHROMIUM
+#define GL_MAILBOX_SIZE_CHROMIUM 64
+#endif
+
+namespace gpu {
+
+struct GPU_EXPORT Mailbox {
+ Mailbox();
+ bool IsZero() const;
+ void SetZero();
+ void SetName(const int8_t* name);
+
+ // Generate a unique unguessable mailbox name.
+ static Mailbox Generate();
+
+ // Verify that the mailbox was created through Mailbox::Generate. This only
+ // works in Debug (always returns true in Release). This is not a secure
+ // check, only to catch bugs where clients forgot to call Mailbox::Generate.
+ bool Verify() const;
+
+ int8_t name[GL_MAILBOX_SIZE_CHROMIUM];
+ bool operator<(const Mailbox& other) const {
+ return memcmp(this, &other, sizeof other) < 0;
+ }
+ bool operator==(const Mailbox& other) const {
+ return memcmp(this, &other, sizeof other) == 0;
+ }
+ bool operator!=(const Mailbox& other) const {
+ return !operator==(other);
+ }
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_MAILBOX_H_
+
diff --git a/gpu/command_buffer/common/mailbox_holder.cc b/gpu/command_buffer/common/mailbox_holder.cc
new file mode 100644
index 0000000..87dec36
--- /dev/null
+++ b/gpu/command_buffer/common/mailbox_holder.cc
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/mailbox_holder.h"
+
+namespace gpu {
+
+MailboxHolder::MailboxHolder() : texture_target(0), sync_point(0) {}
+
+MailboxHolder::MailboxHolder(const Mailbox& mailbox,
+ uint32_t texture_target,
+ uint32_t sync_point)
+ : mailbox(mailbox),
+ texture_target(texture_target),
+ sync_point(sync_point) {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/mailbox_holder.h b/gpu/command_buffer/common/mailbox_holder.h
new file mode 100644
index 0000000..7c7a0e2
--- /dev/null
+++ b/gpu/command_buffer/common/mailbox_holder.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_MAILBOX_HOLDER_H_
+#define GPU_COMMAND_BUFFER_MAILBOX_HOLDER_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+struct GPU_EXPORT MailboxHolder {
+ MailboxHolder();
+ MailboxHolder(const gpu::Mailbox& mailbox,
+ uint32_t texture_target,
+ uint32_t sync_point);
+ gpu::Mailbox mailbox;
+ uint32_t texture_target;
+ uint32_t sync_point;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_MAILBOX_HOLDER_H_
diff --git a/gpu/command_buffer/common/thread_local.h b/gpu/command_buffer/common/thread_local.h
new file mode 100644
index 0000000..bc9ff66
--- /dev/null
+++ b/gpu/command_buffer/common/thread_local.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Functions for allocating and accessing thread local values via key.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_THREAD_LOCAL_H_
+#define GPU_COMMAND_BUFFER_COMMON_THREAD_LOCAL_H_
+
+#if defined(_WIN32)
+#include <windows.h>
+#else
+#include <pthread.h>
+#endif
+
+namespace gpu {
+
+#if defined(_WIN32)
+typedef DWORD ThreadLocalKey;
+#else
+typedef pthread_key_t ThreadLocalKey;
+#endif
+
+inline ThreadLocalKey ThreadLocalAlloc() {
+#if defined(_WIN32)
+ return TlsAlloc();
+#else
+ ThreadLocalKey key;
+ pthread_key_create(&key, NULL);
+ return key;
+#endif
+}
+
+inline void ThreadLocalFree(ThreadLocalKey key) {
+#if defined(_WIN32)
+ TlsFree(key);
+#else
+ pthread_key_delete(key);
+#endif
+}
+
+inline void ThreadLocalSetValue(ThreadLocalKey key, void* value) {
+#if defined(_WIN32)
+ TlsSetValue(key, value);
+#else
+ pthread_setspecific(key, value);
+#endif
+}
+
+inline void* ThreadLocalGetValue(ThreadLocalKey key) {
+#if defined(_WIN32)
+ return TlsGetValue(key);
+#else
+ return pthread_getspecific(key);
+#endif
+}
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_THREAD_LOCAL_H_
diff --git a/gpu/command_buffer/common/time.h b/gpu/command_buffer/common/time.h
new file mode 100644
index 0000000..e4eb942
--- /dev/null
+++ b/gpu/command_buffer/common/time.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_TIME_H_
+#define GPU_COMMAND_BUFFER_COMMON_TIME_H_
+
+#if !defined(__native_client__)
+
+#include "base/time/time.h"
+
+namespace gpu {
+
+inline uint64 MicrosecondsSinceOriginOfTime() {
+ return (base::TimeTicks::HighResNow() - base::TimeTicks()).InMicroseconds();
+}
+
+} // namespace gpu
+
+#else
+
+namespace gpu {
+
+inline uint64 MicrosecondsSinceOriginOfTime() {
+ return 0;
+}
+
+} // namespace gpu
+
+#endif // __native_client__
+
+#endif // GPU_COMMAND_BUFFER_COMMON_TIME_H_
diff --git a/gpu/command_buffer/common/trace_event.h b/gpu/command_buffer/common/trace_event.h
new file mode 100644
index 0000000..e63517e
--- /dev/null
+++ b/gpu/command_buffer/common/trace_event.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_TRACE_EVENT_H_
+#define GPU_COMMAND_BUFFER_COMMON_TRACE_EVENT_H_
+
+#include "base/debug/trace_event.h"
+
+#endif // GPU_COMMAND_BUFFER_COMMON_TRACE_EVENT_H_
diff --git a/gpu/command_buffer/common/unittest_main.cc b/gpu/command_buffer/common/unittest_main.cc
new file mode 100644
index 0000000..2d6a2d1
--- /dev/null
+++ b/gpu/command_buffer/common/unittest_main.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class NoAtExitBaseTestSuite : public base::TestSuite {
+ public:
+ NoAtExitBaseTestSuite(int argc, char** argv)
+ : base::TestSuite(argc, argv, false) {
+ }
+};
+
+int RunTestSuite(int argc, char** argv) {
+ return NoAtExitBaseTestSuite(argc, argv).Run();
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ // On Android, AtExitManager is created in
+ // testing/android/native_test_wrapper.cc before main() is called.
+ // The same thing is also done in base/test/test_suite.cc
+#if !defined(OS_ANDROID)
+ base::AtExitManager exit_manager;
+#endif
+ CommandLine::Init(argc, argv);
+ testing::InitGoogleMock(&argc, argv);
+ return base::LaunchUnitTests(argc,
+ argv,
+ base::Bind(&RunTestSuite, argc, argv));
+}
diff --git a/gpu/command_buffer/docs/gles2_cmd_format_docs.txt b/gpu/command_buffer/docs/gles2_cmd_format_docs.txt
new file mode 100644
index 0000000..67306ab
--- /dev/null
+++ b/gpu/command_buffer/docs/gles2_cmd_format_docs.txt
@@ -0,0 +1,2262 @@
+//! \file
+//!
+//! The public interface for 3D graphics is based on a command buffer.
+//!
+//! This was chosen because it provides an easy way to separate the process of
+//! writing commands from the process of reading those commands without
+//! requiring too much overhead to keep the two processes in sync.
+//!
+//! You can use this info to write commands yourself. Most developers will use
+//! the provided OpenGL ES 2.0 implementation that issues these commands for
+//! them.
+//!
+//! Each command starts with a header. The header is 32 bits, where the first 21
+//! bits define the number of 32 bit entries, including the header, the command
+//! represents. The last 11 bits specify the command.
+//!
+//! Commands that send a variable amount of data have 1 to 3 ways to send that
+//! data.
+//!
+//! Many commands can send their data in shared memory. The command will take
+//! an id of the shared memory and an offset into that shared memory of where
+//! the data lives. Commands are executed asynchronously, so the client
+//! program must be careful to leave the data available until the command has
+//! executed.
+//!
+//! Some commands have an 'immediate' version where the data appears directly
+//! after the command in memory.
+//!
+//! A 3rd way of passing data is through Buckets. Buckets are identified by
+//! number. You create a bucket with the command SetBucketSize, you can then
+//! fill the bucket with SetBucketData commands. Once you've sent all your
+//! data you can then issue a command that uses the bucket and takes a bucket
+//! id for which bucket to use.
+//!
+//! Receiving data works similarly. Some commands return their data to shared
+//! memory. Other commands return their data through buckets which can then be
+//! queried with the GetBucketSize and GetBucketData commands. In either case
+//! the data will not be available until the command executes.
+//!
+//! All commands and arguments are validated. If a command fails validation the
+//! service will stop processing commands. It is the responsibility of the
+//! client to never issue an invalid command.
+//!
+//! Examples of invalid commands.
+//! - A command's size does not match the command.
+//! - A command's size would address memory outside the command buffer
+//! - A shared memory id is invalid
+//! - A shared memory offset is out of range for the given shared memory
+//! - The size of the data a command would access in shared memory is out of
+//! range for the given shared memory buffer.
+//! - A result (in the transfer buffer) is not initialized to the
+//! failure case. For example, any command that returns a SizedResult
+//! will take a shared memory id and offset to where to store the result.
+//! That size field of the result must be set to 0 before issuing the
+//! the command. That way, if the command buffer service fails the
+//! client will see a 0 size.
+//!
+//! The docs are a little terse. For any command that corresponds to an OpenGL
+//! ES 2.0 function the arguments should be clear by looking at the OpenGL ES
+//! 2.0 documentation with minor caveats.
+//!
+//! - Client side arrays are not supported at the command buffer level
+//! so DrawArrays and VertexAttribPointer only take offsets into buffers.
+//! - The commands GenBuffers, GetTextures, CreateProgram, CreateShader, etc
+//! take client side ids and register them with the service. It's up to the
+//! client to make up the ids.
+//! - For shared resources, it's still up to the client to make up ids.
+//! but to help keep them in sync with other threads the commands
+//! GenSharedIds, RegisterSharedIds and DeleteSharedIds can be used.
+//!
+
+//! The command header.
+struct CommandHeader {
+ Uint32 size:21;
+ Uint32 command:11;
+};
+
+
+//! Used for some glGetXXX commands that return a result through a pointer. We
+//! need to know if the command succeeded or not and the size of the result. If
+//! the command failed its result size will 0. You must set the size to 0
+//! before issuing the command.
+//!
+//! To retrieve the data you might do something like this pseudo code:
+//!
+//! GetAttachedShaders::Result* result = address-of-shared-memory
+//! int num_results = result->size / sizeof(GLuint); // the type returned
+//! GLuint* results = &result->data;
+//! for (int ii = 0; ii < num_results; ++ii) {
+//! printf("%d\n", results[ii]);
+//! }
+//!
+template <typename T>
+struct SizedResult {
+ uint32 size; // in bytes.
+ T data; // this is just here to get an offset.
+};
+
+
+//! A Noop command.
+struct Noop {
+ static const CommandId kCmdId = 0;
+
+ CommandHeader header;
+};
+
+//! The SetToken command puts a token in the command stream that you can
+//! use to check if that token has been passed in the command stream.
+struct SetToken {
+ static const CommandId kCmdId = 1;
+
+ CommandHeader header;
+ uint32 token;
+};
+
+//! The Jump command jumps to another place in the command buffer.
+struct Jump {
+ static const CommandId kCmdId = 3;
+
+ CommandHeader header;
+ uint32 offset;
+};
+
+//! The JumpRelative command jumps to another place in the command buffer
+//! relative to the end of this command. In other words. JumpRelative with an
+//! offset of zero is effectively a no-op.
+struct JumpRelative {
+ static const CommandId kCmdId = 4;
+
+ CommandHeader header;
+ int32 offset;
+};
+
+//! The Call command jumps to a subroutine which can be returned from with the
+//! Return command.
+struct Call {
+ static const CommandId kCmdId = 5;
+
+ CommandHeader header;
+ uint32 offset;
+};
+
+//! The CallRelative command jumps to a subroutine using a relative offset. The
+//! offset is relative to the end of this command..
+struct CallRelative {
+ static const CommandId kCmdId = 6;
+
+ CommandHeader header;
+ int32 offset;
+};
+
+//! Returns from a subroutine called by the Call or CallRelative commands.
+struct Return {
+ static const CommandId kCmdId = 7;
+
+ CommandHeader header;
+};
+
+//! Sets the size of a bucket for collecting data on the service side.
+//! This is a utility for gathering data on the service side so it can be used
+//! all at once when some service side API is called. It removes the need to
+//! add special commands just to support a particular API. For example, any API
+//! command that needs a string needs a way to send that string to the API over
+//! the command buffers. While you can require that the command buffer or
+//! transfer buffer be large enough to hold the largest string you can send,
+//! using this command removes that restriction by letting you send smaller
+//! pieces over and build up the data on the service side.
+//!
+//! You can clear a bucket on the service side and thereby free memory by
+//! sending a size of 0.
+struct SetBucketSize {
+ static const CommandId kCmdId = 8;
+
+ CommandHeader header;
+ uint32 bucket_id;
+ uint32 size;
+};
+
+//! Sets the contents of a portion of a bucket on the service side from data in
+//! shared memory.
+//! See SetBucketSize.
+struct SetBucketData {
+ static const CommandId kCmdId = 9;
+
+ CommandHeader header;
+ uint32 bucket_id;
+ uint32 offset;
+ uint32 size;
+ uint32 shared_memory_id;
+ uint32 shared_memory_offset;
+};
+
+//! Sets the contents of a portion of a bucket on the service side from data in
+//! the command buffer.
+//! See SetBucketSize.
+struct SetBucketDataImmediate {
+ static const CommandId kCmdId = 10;
+
+ CommandHeader header;
+ uint32 bucket_id;
+ uint32 offset;
+ uint32 size;
+};
+
+//! Gets the size of a bucket the service has available. Sending a variable
+//! size result back to the client, for example any API that returns a string,
+//! is problematic since the largest thing you can send back is the size of
+//! your shared memory. This command along with GetBucketData implements a way
+//! to get a result a piece at a time to help solve that problem in a generic
+//! way.
+struct GetBucketSize {
+ static const CommandId kCmdId = 11;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 bucket_id;
+ uint32 shared_memory_id;
+ uint32 shared_memory_offset;
+};
+
+//! Gets a piece of a result the service has available.
+//! See GetBucketSize.
+struct GetBucketData {
+ static const CommandId kCmdId = 12;
+
+ CommandHeader header;
+ uint32 bucket_id;
+ uint32 offset;
+ uint32 size;
+ uint32 shared_memory_id;
+ uint32 shared_memory_offset;
+};
+
+// OpenGL ES 2.0 related commands.
+
+//! Command that corresponds to glActiveTexture.
+struct ActiveTexture {
+ static const CommandId kCmdId = 256;
+
+ CommandHeader header;
+ uint32 texture; //!< GLenum
+};
+
+//! Command that corresponds to glAttachShader.
+struct AttachShader {
+ static const CommandId kCmdId = 257;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 shader; //!< GLuint
+};
+
+//! Command that corresponds to glBindAttribLocation.
+struct BindAttribLocation {
+ static const CommandId kCmdId = 258;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 index; //!< GLuint
+ uint32 name_shm_id; //!< uint32
+ uint32 name_shm_offset; //!< uint32
+ uint32 data_size; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glBindAttribLocation.
+struct BindAttribLocationImmediate {
+ static const CommandId kCmdId = 259;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 index; //!< GLuint
+ uint32 data_size; //!< uint32
+};
+
+//! Bucket version of command that corresponds to glBindAttribLocation.
+struct BindAttribLocationBucket {
+ static const CommandId kCmdId = 432;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 index; //!< GLuint
+ uint32 name_bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glBindBuffer.
+struct BindBuffer {
+ static const CommandId kCmdId = 260;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 buffer; //!< GLuint
+};
+
+//! Command that corresponds to glBindFramebuffer.
+struct BindFramebuffer {
+ static const CommandId kCmdId = 261;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 framebuffer; //!< GLuint
+};
+
+//! Command that corresponds to glBindRenderbuffer.
+struct BindRenderbuffer {
+ static const CommandId kCmdId = 262;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 renderbuffer; //!< GLuint
+};
+
+//! Command that corresponds to glBindTexture.
+struct BindTexture {
+ static const CommandId kCmdId = 263;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 texture; //!< GLuint
+};
+
+//! Command that corresponds to glBlendColor.
+struct BlendColor {
+ static const CommandId kCmdId = 264;
+
+ CommandHeader header;
+ float red; //!< GLclampf
+ float green; //!< GLclampf
+ float blue; //!< GLclampf
+ float alpha; //!< GLclampf
+};
+
+//! Command that corresponds to glBlendEquation.
+struct BlendEquation {
+ static const CommandId kCmdId = 265;
+
+ CommandHeader header;
+ uint32 mode; //!< GLenum
+};
+
+//! Command that corresponds to glBlendEquationSeparate.
+struct BlendEquationSeparate {
+ static const CommandId kCmdId = 266;
+
+ CommandHeader header;
+ uint32 modeRGB; //!< GLenum
+ uint32 modeAlpha; //!< GLenum
+};
+
+//! Command that corresponds to glBlendFunc.
+struct BlendFunc {
+ static const CommandId kCmdId = 267;
+
+ CommandHeader header;
+ uint32 sfactor; //!< GLenum
+ uint32 dfactor; //!< GLenum
+};
+
+//! Command that corresponds to glBlendFuncSeparate.
+struct BlendFuncSeparate {
+ static const CommandId kCmdId = 268;
+
+ CommandHeader header;
+ uint32 srcRGB; //!< GLenum
+ uint32 dstRGB; //!< GLenum
+ uint32 srcAlpha; //!< GLenum
+ uint32 dstAlpha; //!< GLenum
+};
+
+//! Command that corresponds to glBufferData.
+struct BufferData {
+ static const CommandId kCmdId = 269;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 size; //!< GLsizeiptr
+ uint32 data_shm_id; //!< uint32
+ uint32 data_shm_offset; //!< uint32
+ uint32 usage; //!< GLenum
+};
+
+//! Immediate version of command that corresponds to glBufferData.
+struct BufferDataImmediate {
+ static const CommandId kCmdId = 270;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 size; //!< GLsizeiptr
+ uint32 usage; //!< GLenum
+};
+
+//! Command that corresponds to glBufferSubData.
+struct BufferSubData {
+ static const CommandId kCmdId = 271;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 offset; //!< GLintptr
+ int32 size; //!< GLsizeiptr
+ uint32 data_shm_id; //!< uint32
+ uint32 data_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glBufferSubData.
+struct BufferSubDataImmediate {
+ static const CommandId kCmdId = 272;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 offset; //!< GLintptr
+ int32 size; //!< GLsizeiptr
+};
+
+//! Command that corresponds to glCheckFramebufferStatus.
+struct CheckFramebufferStatus {
+ static const CommandId kCmdId = 273;
+
+ typedef GLenum Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glClear.
+struct Clear {
+ static const CommandId kCmdId = 274;
+
+ CommandHeader header;
+ uint32 mask; //!< GLbitfield
+};
+
+//! Command that corresponds to glClearColor.
+struct ClearColor {
+ static const CommandId kCmdId = 275;
+
+ CommandHeader header;
+ float red; //!< GLclampf
+ float green; //!< GLclampf
+ float blue; //!< GLclampf
+ float alpha; //!< GLclampf
+};
+
+//! Command that corresponds to glClearDepthf.
+struct ClearDepthf {
+ static const CommandId kCmdId = 276;
+
+ CommandHeader header;
+ float depth; //!< GLclampf
+};
+
+//! Command that corresponds to glClearStencil.
+struct ClearStencil {
+ static const CommandId kCmdId = 277;
+
+ CommandHeader header;
+ int32 s; //!< GLint
+};
+
+//! Command that corresponds to glColorMask.
+struct ColorMask {
+ static const CommandId kCmdId = 278;
+
+ CommandHeader header;
+ uint32 red; //!< GLboolean
+ uint32 green; //!< GLboolean
+ uint32 blue; //!< GLboolean
+ uint32 alpha; //!< GLboolean
+};
+
+//! Command that corresponds to glCompileShader.
+struct CompileShader {
+ static const CommandId kCmdId = 279;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+};
+
+//! Command that corresponds to glCompressedTexImage2D.
+struct CompressedTexImage2D {
+ static const CommandId kCmdId = 280;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ uint32 internalformat; //!< GLenum
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+ int32 imageSize; //!< GLsizei
+ uint32 data_shm_id; //!< uint32
+ uint32 data_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glCompressedTexImage2D.
+struct CompressedTexImage2DImmediate {
+ static const CommandId kCmdId = 281;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ uint32 internalformat; //!< GLenum
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+ int32 imageSize; //!< GLsizei
+};
+
+//! Bucket version of command that corresponds to glCompressedTexImage2D.
+struct CompressedTexImage2DBucket {
+ static const CommandId kCmdId = 443;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ uint32 internalformat; //!< GLenum
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+ uint32 bucket_id; //!< GLuint
+};
+
+//! Command that corresponds to glCompressedTexSubImage2D.
+struct CompressedTexSubImage2D {
+ static const CommandId kCmdId = 282;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ int32 imageSize; //!< GLsizei
+ uint32 data_shm_id; //!< uint32
+ uint32 data_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glCompressedTexSubImage2D.
+struct CompressedTexSubImage2DImmediate {
+ static const CommandId kCmdId = 283;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ int32 imageSize; //!< GLsizei
+};
+
+//! Bucket version of command that corresponds to glCompressedTexSubImage2D.
+struct CompressedTexSubImage2DBucket {
+ static const CommandId kCmdId = 444;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ uint32 bucket_id; //!< GLuint
+};
+
+//! Command that corresponds to glCopyTexImage2D.
+struct CopyTexImage2D {
+ static const CommandId kCmdId = 284;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ uint32 internalformat; //!< GLenum
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+};
+
+//! Command that corresponds to glCopyTexSubImage2D.
+struct CopyTexSubImage2D {
+ static const CommandId kCmdId = 285;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+};
+
+//! Command that corresponds to glCreateProgram.
+struct CreateProgram {
+ static const CommandId kCmdId = 286;
+
+ CommandHeader header;
+ uint32 client_id; //!< uint32
+};
+
+//! Command that corresponds to glCreateShader.
+struct CreateShader {
+ static const CommandId kCmdId = 287;
+
+ CommandHeader header;
+ uint32 type; //!< GLenum
+ uint32 client_id; //!< uint32
+};
+
+//! Command that corresponds to glCullFace.
+struct CullFace {
+ static const CommandId kCmdId = 288;
+
+ CommandHeader header;
+ uint32 mode; //!< GLenum
+};
+
+//! Command that corresponds to glDeleteBuffers.
+struct DeleteBuffers {
+ static const CommandId kCmdId = 289;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 buffers_shm_id; //!< uint32
+ uint32 buffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glDeleteBuffers.
+struct DeleteBuffersImmediate {
+ static const CommandId kCmdId = 290;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glDeleteFramebuffers.
+struct DeleteFramebuffers {
+ static const CommandId kCmdId = 291;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 framebuffers_shm_id; //!< uint32
+ uint32 framebuffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glDeleteFramebuffers.
+struct DeleteFramebuffersImmediate {
+ static const CommandId kCmdId = 292;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glDeleteProgram.
+struct DeleteProgram {
+ static const CommandId kCmdId = 293;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+};
+
+//! Command that corresponds to glDeleteRenderbuffers.
+struct DeleteRenderbuffers {
+ static const CommandId kCmdId = 294;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 renderbuffers_shm_id; //!< uint32
+ uint32 renderbuffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glDeleteRenderbuffers.
+struct DeleteRenderbuffersImmediate {
+ static const CommandId kCmdId = 295;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glDeleteShader.
+struct DeleteShader {
+ static const CommandId kCmdId = 296;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+};
+
+//! Command that corresponds to glDeleteTextures.
+struct DeleteTextures {
+ static const CommandId kCmdId = 297;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 textures_shm_id; //!< uint32
+ uint32 textures_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glDeleteTextures.
+struct DeleteTexturesImmediate {
+ static const CommandId kCmdId = 298;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glDepthFunc.
+struct DepthFunc {
+ static const CommandId kCmdId = 299;
+
+ CommandHeader header;
+ uint32 func; //!< GLenum
+};
+
+//! Command that corresponds to glDepthMask.
+struct DepthMask {
+ static const CommandId kCmdId = 300;
+
+ CommandHeader header;
+ uint32 flag; //!< GLboolean
+};
+
+//! Command that corresponds to glDepthRangef.
+struct DepthRangef {
+ static const CommandId kCmdId = 301;
+
+ CommandHeader header;
+ float zNear; //!< GLclampf
+ float zFar; //!< GLclampf
+};
+
+//! Command that corresponds to glDetachShader.
+struct DetachShader {
+ static const CommandId kCmdId = 302;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 shader; //!< GLuint
+};
+
+//! Command that corresponds to glDisable.
+struct Disable {
+ static const CommandId kCmdId = 303;
+
+ CommandHeader header;
+ uint32 cap; //!< GLenum
+};
+
+//! Command that corresponds to glDisableVertexAttribArray.
+struct DisableVertexAttribArray {
+ static const CommandId kCmdId = 304;
+
+ CommandHeader header;
+ uint32 index; //!< GLuint
+};
+
+//! Command that corresponds to glDrawArrays.
+struct DrawArrays {
+ static const CommandId kCmdId = 305;
+
+ CommandHeader header;
+ uint32 mode; //!< GLenum
+ int32 first; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glDrawElements.
+struct DrawElements {
+ static const CommandId kCmdId = 306;
+
+ CommandHeader header;
+ uint32 mode; //!< GLenum
+ int32 count; //!< GLsizei
+ uint32 type; //!< GLenum
+ uint32 index_offset; //!< GLuint
+};
+
+//! Command that corresponds to glEnable.
+struct Enable {
+ static const CommandId kCmdId = 307;
+
+ CommandHeader header;
+ uint32 cap; //!< GLenum
+};
+
+//! Command that corresponds to glEnableVertexAttribArray.
+struct EnableVertexAttribArray {
+ static const CommandId kCmdId = 308;
+
+ CommandHeader header;
+ uint32 index; //!< GLuint
+};
+
+//! Command that corresponds to glFinish.
+struct Finish {
+ static const CommandId kCmdId = 309;
+
+ CommandHeader header;
+};
+
+//! Command that corresponds to glFlush.
+struct Flush {
+ static const CommandId kCmdId = 310;
+
+ CommandHeader header;
+};
+
+//! Command that corresponds to glFramebufferRenderbuffer.
+struct FramebufferRenderbuffer {
+ static const CommandId kCmdId = 311;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 attachment; //!< GLenum
+ uint32 renderbuffertarget; //!< GLenum
+ uint32 renderbuffer; //!< GLuint
+};
+
+//! Command that corresponds to glFramebufferTexture2D.
+struct FramebufferTexture2D {
+ static const CommandId kCmdId = 312;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 attachment; //!< GLenum
+ uint32 textarget; //!< GLenum
+ uint32 texture; //!< GLuint
+ int32 level; //!< GLint
+};
+
+//! Command that corresponds to glFrontFace.
+struct FrontFace {
+ static const CommandId kCmdId = 313;
+
+ CommandHeader header;
+ uint32 mode; //!< GLenum
+};
+
+//! Command that corresponds to glGenBuffers.
+struct GenBuffers {
+ static const CommandId kCmdId = 314;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 buffers_shm_id; //!< uint32
+ uint32 buffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glGenBuffers.
+struct GenBuffersImmediate {
+ static const CommandId kCmdId = 315;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glGenerateMipmap.
+struct GenerateMipmap {
+ static const CommandId kCmdId = 316;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+};
+
+//! Command that corresponds to glGenFramebuffers.
+struct GenFramebuffers {
+ static const CommandId kCmdId = 317;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 framebuffers_shm_id; //!< uint32
+ uint32 framebuffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glGenFramebuffers.
+struct GenFramebuffersImmediate {
+ static const CommandId kCmdId = 318;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glGenRenderbuffers.
+struct GenRenderbuffers {
+ static const CommandId kCmdId = 319;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 renderbuffers_shm_id; //!< uint32
+ uint32 renderbuffers_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glGenRenderbuffers.
+struct GenRenderbuffersImmediate {
+ static const CommandId kCmdId = 320;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glGenTextures.
+struct GenTextures {
+ static const CommandId kCmdId = 321;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 textures_shm_id; //!< uint32
+ uint32 textures_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glGenTextures.
+struct GenTexturesImmediate {
+ static const CommandId kCmdId = 322;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+};
+
+//! Command that corresponds to glGetActiveAttrib.
+struct GetActiveAttrib {
+ static const CommandId kCmdId = 323;
+
+ struct Result {
+ int32 success;
+ int32 size;
+ uint32 type;
+ };
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 index; //!< GLuint
+ uint32 name_bucket_id; //!< uint32
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetActiveUniform.
+struct GetActiveUniform {
+ static const CommandId kCmdId = 324;
+
+ struct Result {
+ int32 success;
+ int32 size;
+ uint32 type;
+ };
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 index; //!< GLuint
+ uint32 name_bucket_id; //!< uint32
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetAttachedShaders.
+struct GetAttachedShaders {
+ static const CommandId kCmdId = 325;
+
+ typedef SizedResult<GLuint> Result;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+ uint32 result_size; //!< uint32
+};
+
+//! Command that corresponds to glGetAttribLocation.
+struct GetAttribLocation {
+ static const CommandId kCmdId = 326;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 name_shm_id;
+ uint32 name_shm_offset;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+ uint32 data_size;
+};
+
+//! Immediate version of command that corresponds to glGetAttribLocation.
+struct GetAttribLocationImmediate {
+ static const CommandId kCmdId = 327;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+ uint32 data_size;
+};
+
+//! Bucket version of command that corresponds to glGetAttribLocation.
+struct GetAttribLocationBucket {
+ static const CommandId kCmdId = 434;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 name_bucket_id;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+};
+
+//! Command that corresponds to glGetBooleanv.
+struct GetBooleanv {
+ static const CommandId kCmdId = 328;
+
+ typedef SizedResult<GLboolean> Result;
+
+ CommandHeader header;
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetBufferParameteriv.
+struct GetBufferParameteriv {
+ static const CommandId kCmdId = 329;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetError.
+struct GetError {
+ static const CommandId kCmdId = 330;
+
+ typedef GLenum Result;
+
+ CommandHeader header;
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetFloatv.
+struct GetFloatv {
+ static const CommandId kCmdId = 331;
+
+ typedef SizedResult<GLfloat> Result;
+
+ CommandHeader header;
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetFramebufferAttachmentParameteriv.
+struct GetFramebufferAttachmentParameteriv {
+ static const CommandId kCmdId = 332;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 attachment; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetIntegerv.
+struct GetIntegerv {
+ static const CommandId kCmdId = 333;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetProgramiv.
+struct GetProgramiv {
+ static const CommandId kCmdId = 334;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetProgramInfoLog.
+struct GetProgramInfoLog {
+ static const CommandId kCmdId = 335;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glGetRenderbufferParameteriv.
+struct GetRenderbufferParameteriv {
+ static const CommandId kCmdId = 336;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetShaderiv.
+struct GetShaderiv {
+ static const CommandId kCmdId = 337;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetShaderInfoLog.
+struct GetShaderInfoLog {
+ static const CommandId kCmdId = 338;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glGetShaderPrecisionFormat.
+struct GetShaderPrecisionFormat {
+ static const CommandId kCmdId = 339;
+
+ struct Result {
+ int32 success;
+ int32 min_range;
+ int32 max_range;
+ int32 precision;
+ };
+
+ CommandHeader header;
+ uint32 shadertype; //!< GLenum
+ uint32 precisiontype; //!< GLenum
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetShaderSource.
+struct GetShaderSource {
+ static const CommandId kCmdId = 340;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glGetString.
+struct GetString {
+ static const CommandId kCmdId = 341;
+
+ CommandHeader header;
+ uint32 name; //!< GLenum
+ uint32 bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glGetTexParameterfv.
+struct GetTexParameterfv {
+ static const CommandId kCmdId = 342;
+
+ typedef SizedResult<GLfloat> Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetTexParameteriv.
+struct GetTexParameteriv {
+ static const CommandId kCmdId = 343;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetUniformLocation.
+struct GetUniformLocation {
+ static const CommandId kCmdId = 346;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 name_shm_id;
+ uint32 name_shm_offset;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+ uint32 data_size;
+};
+
+//! Immediate version of command that corresponds to glGetUniformLocation.
+struct GetUniformLocationImmediate {
+ static const CommandId kCmdId = 347;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+ uint32 data_size;
+};
+
+//! Bucket version of command that corresponds to glGetUniformLocation.
+struct GetUniformLocationBucket {
+ static const CommandId kCmdId = 433;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 program;
+ uint32 name_bucket_id;
+ uint32 location_shm_id;
+ uint32 location_shm_offset;
+};
+
+
+//! Command that corresponds to glGetUniformfv.
+struct GetUniformfv {
+ static const CommandId kCmdId = 344;
+
+ typedef SizedResult<GLfloat> Result;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ int32 location; //!< GLint
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetUniformiv.
+struct GetUniformiv {
+ static const CommandId kCmdId = 345;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ int32 location; //!< GLint
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetVertexAttribfv.
+struct GetVertexAttribfv {
+ static const CommandId kCmdId = 348;
+
+ typedef SizedResult<GLfloat> Result;
+
+ CommandHeader header;
+ uint32 index; //!< GLuint
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetVertexAttribiv.
+struct GetVertexAttribiv {
+ static const CommandId kCmdId = 349;
+
+ typedef SizedResult<GLint> Result;
+
+ CommandHeader header;
+ uint32 index; //!< GLuint
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glGetVertexAttribPointerv.
+struct GetVertexAttribPointerv {
+ static const CommandId kCmdId = 350;
+
+ typedef SizedResult<GLuint> Result;
+
+ CommandHeader header;
+ uint32 index; //!< GLuint
+ uint32 pname; //!< GLenum
+ uint32 pointer_shm_id; //!< uint32
+ uint32 pointer_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glHint.
+struct Hint {
+ static const CommandId kCmdId = 351;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 mode; //!< GLenum
+};
+
+//! Command that corresponds to glIsBuffer.
+struct IsBuffer {
+ static const CommandId kCmdId = 352;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 buffer; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsEnabled.
+struct IsEnabled {
+ static const CommandId kCmdId = 353;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 cap; //!< GLenum
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsFramebuffer.
+struct IsFramebuffer {
+ static const CommandId kCmdId = 354;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 framebuffer; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsProgram.
+struct IsProgram {
+ static const CommandId kCmdId = 355;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsRenderbuffer.
+struct IsRenderbuffer {
+ static const CommandId kCmdId = 356;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 renderbuffer; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsShader.
+struct IsShader {
+ static const CommandId kCmdId = 357;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glIsTexture.
+struct IsTexture {
+ static const CommandId kCmdId = 358;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ uint32 texture; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glLineWidth.
+struct LineWidth {
+ static const CommandId kCmdId = 359;
+
+ CommandHeader header;
+ float width; //!< GLfloat
+};
+
+//! Command that corresponds to glLinkProgram.
+struct LinkProgram {
+ static const CommandId kCmdId = 360;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+};
+
+//! Command that corresponds to glPixelStorei.
+struct PixelStorei {
+ static const CommandId kCmdId = 361;
+
+ CommandHeader header;
+ uint32 pname; //!< GLenum
+ int32 param; //!< GLint
+};
+
+//! Command that corresponds to glPolygonOffset.
+struct PolygonOffset {
+ static const CommandId kCmdId = 362;
+
+ CommandHeader header;
+ float factor; //!< GLfloat
+ float units; //!< GLfloat
+};
+
+//! Command that corresponds to glReadPixels.
+//! ReadPixels has the result separated from the pixel buffer so that
+//! it is easier to specify the result going to some specific place
+//! that exactly fits the rectangle of pixels.
+struct ReadPixels {
+ static const CommandId kCmdId = 363;
+
+ typedef uint32 Result;
+
+ CommandHeader header;
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ uint32 type; //!< GLenum
+ uint32 pixels_shm_id; //!< uint32
+ uint32 pixels_shm_offset; //!< uint32
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that corresponds to glReleaseShaderCompiler.
+struct ReleaseShaderCompiler {
+ static const CommandId kCmdId = 437;
+
+ CommandHeader header;
+};
+
+//! Command that corresponds to glRenderbufferStorage.
+struct RenderbufferStorage {
+ static const CommandId kCmdId = 364;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 internalformat; //!< GLenum
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+};
+
+//! Command that corresponds to glSampleCoverage.
+struct SampleCoverage {
+ static const CommandId kCmdId = 365;
+
+ CommandHeader header;
+ float value; //!< GLclampf
+ uint32 invert; //!< GLboolean
+};
+
+//! Command that corresponds to glScissor.
+struct Scissor {
+ static const CommandId kCmdId = 366;
+
+ CommandHeader header;
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+};
+
+//! Command that corresponds to glShaderBinary.
+struct ShaderBinary {
+ static const CommandId kCmdId = 436;
+
+ CommandHeader header;
+ int32 n; //!< GLsizei
+ uint32 shaders_shm_id; //!< uint32
+ uint32 shaders_shm_offset; //!< uint32
+ uint32 binaryformat; //!< GLenum
+ uint32 binary_shm_id; //!< uint32
+ uint32 binary_shm_offset; //!< uint32
+ int32 length; //!< GLsizei
+};
+
+//! Command that corresponds to glShaderSource.
+struct ShaderSource {
+ static const CommandId kCmdId = 367;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 data_shm_id; //!< uint32
+ uint32 data_shm_offset; //!< uint32
+ uint32 data_size; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glShaderSource.
+struct ShaderSourceImmediate {
+ static const CommandId kCmdId = 368;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 data_size; //!< uint32
+};
+
+//! Bucket version of command that corresponds to glShaderSource.
+struct ShaderSourceBucket {
+ static const CommandId kCmdId = 435;
+
+ CommandHeader header;
+ uint32 shader; //!< GLuint
+ uint32 data_bucket_id; //!< uint32
+};
+
+//! Command that corresponds to glStencilFunc.
+struct StencilFunc {
+ static const CommandId kCmdId = 369;
+
+ CommandHeader header;
+ uint32 func; //!< GLenum
+ int32 ref; //!< GLint
+ uint32 mask; //!< GLuint
+};
+
+//! Command that corresponds to glStencilFuncSeparate.
+struct StencilFuncSeparate {
+ static const CommandId kCmdId = 370;
+
+ CommandHeader header;
+ uint32 face; //!< GLenum
+ uint32 func; //!< GLenum
+ int32 ref; //!< GLint
+ uint32 mask; //!< GLuint
+};
+
+//! Command that corresponds to glStencilMask.
+struct StencilMask {
+ static const CommandId kCmdId = 371;
+
+ CommandHeader header;
+ uint32 mask; //!< GLuint
+};
+
+//! Command that corresponds to glStencilMaskSeparate.
+struct StencilMaskSeparate {
+ static const CommandId kCmdId = 372;
+
+ CommandHeader header;
+ uint32 face; //!< GLenum
+ uint32 mask; //!< GLuint
+};
+
+//! Command that corresponds to glStencilOp.
+struct StencilOp {
+ static const CommandId kCmdId = 373;
+
+ CommandHeader header;
+ uint32 fail; //!< GLenum
+ uint32 zfail; //!< GLenum
+ uint32 zpass; //!< GLenum
+};
+
+//! Command that corresponds to glStencilOpSeparate.
+struct StencilOpSeparate {
+ static const CommandId kCmdId = 374;
+
+ CommandHeader header;
+ uint32 face; //!< GLenum
+ uint32 fail; //!< GLenum
+ uint32 zfail; //!< GLenum
+ uint32 zpass; //!< GLenum
+};
+
+//! Command that corresponds to glTexImage2D.
+struct TexImage2D {
+ static const CommandId kCmdId = 375;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 internalformat; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+ uint32 format; //!< GLenum
+ uint32 type; //!< GLenum
+ uint32 pixels_shm_id; //!< uint32
+ uint32 pixels_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glTexImage2D.
+struct TexImage2DImmediate {
+ static const CommandId kCmdId = 376;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 internalformat; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ int32 border; //!< GLint
+ uint32 format; //!< GLenum
+ uint32 type; //!< GLenum
+};
+
+//! Command that corresponds to glTexParameterf.
+struct TexParameterf {
+ static const CommandId kCmdId = 377;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ float param; //!< GLfloat
+};
+
+//! Command that corresponds to glTexParameterfv.
+struct TexParameterfv {
+ static const CommandId kCmdId = 378;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glTexParameterfv.
+struct TexParameterfvImmediate {
+ static const CommandId kCmdId = 379;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+};
+
+//! Command that corresponds to glTexParameteri.
+struct TexParameteri {
+ static const CommandId kCmdId = 380;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ int32 param; //!< GLint
+};
+
+//! Command that corresponds to glTexParameteriv.
+struct TexParameteriv {
+ static const CommandId kCmdId = 381;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+ uint32 params_shm_id; //!< uint32
+ uint32 params_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glTexParameteriv.
+struct TexParameterivImmediate {
+ static const CommandId kCmdId = 382;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ uint32 pname; //!< GLenum
+};
+
+//! Command that corresponds to glTexSubImage2D.
+struct TexSubImage2D {
+ static const CommandId kCmdId = 383;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ uint32 type; //!< GLenum
+ uint32 pixels_shm_id; //!< uint32
+ uint32 pixels_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glTexSubImage2D.
+struct TexSubImage2DImmediate {
+ static const CommandId kCmdId = 384;
+
+ CommandHeader header;
+ uint32 target; //!< GLenum
+ int32 level; //!< GLint
+ int32 xoffset; //!< GLint
+ int32 yoffset; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+ uint32 format; //!< GLenum
+ uint32 type; //!< GLenum
+};
+
+//! Command that corresponds to glUniform1f.
+struct Uniform1f {
+ static const CommandId kCmdId = 385;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ float x; //!< GLfloat
+};
+
+//! Command that corresponds to glUniform1fv.
+struct Uniform1fv {
+ static const CommandId kCmdId = 386;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform1fv.
+struct Uniform1fvImmediate {
+ static const CommandId kCmdId = 387;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform1i.
+struct Uniform1i {
+ static const CommandId kCmdId = 388;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 x; //!< GLint
+};
+
+//! Command that corresponds to glUniform1iv.
+struct Uniform1iv {
+ static const CommandId kCmdId = 389;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform1iv.
+struct Uniform1ivImmediate {
+ static const CommandId kCmdId = 390;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform2f.
+struct Uniform2f {
+ static const CommandId kCmdId = 391;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+};
+
+//! Command that corresponds to glUniform2fv.
+struct Uniform2fv {
+ static const CommandId kCmdId = 392;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform2fv.
+struct Uniform2fvImmediate {
+ static const CommandId kCmdId = 393;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform2i.
+struct Uniform2i {
+ static const CommandId kCmdId = 394;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+};
+
+//! Command that corresponds to glUniform2iv.
+struct Uniform2iv {
+ static const CommandId kCmdId = 395;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform2iv.
+struct Uniform2ivImmediate {
+ static const CommandId kCmdId = 396;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform3f.
+struct Uniform3f {
+ static const CommandId kCmdId = 397;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+ float z; //!< GLfloat
+};
+
+//! Command that corresponds to glUniform3fv.
+struct Uniform3fv {
+ static const CommandId kCmdId = 398;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform3fv.
+struct Uniform3fvImmediate {
+ static const CommandId kCmdId = 399;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform3i.
+struct Uniform3i {
+ static const CommandId kCmdId = 400;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 z; //!< GLint
+};
+
+//! Command that corresponds to glUniform3iv.
+struct Uniform3iv {
+ static const CommandId kCmdId = 401;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform3iv.
+struct Uniform3ivImmediate {
+ static const CommandId kCmdId = 402;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform4f.
+struct Uniform4f {
+ static const CommandId kCmdId = 403;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+ float z; //!< GLfloat
+ float w; //!< GLfloat
+};
+
+//! Command that corresponds to glUniform4fv.
+struct Uniform4fv {
+ static const CommandId kCmdId = 404;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform4fv.
+struct Uniform4fvImmediate {
+ static const CommandId kCmdId = 405;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniform4i.
+struct Uniform4i {
+ static const CommandId kCmdId = 406;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 z; //!< GLint
+ int32 w; //!< GLint
+};
+
+//! Command that corresponds to glUniform4iv.
+struct Uniform4iv {
+ static const CommandId kCmdId = 407;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 v_shm_id; //!< uint32
+ uint32 v_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniform4iv.
+struct Uniform4ivImmediate {
+ static const CommandId kCmdId = 408;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+};
+
+//! Command that corresponds to glUniformMatrix2fv.
+struct UniformMatrix2fv {
+ static const CommandId kCmdId = 409;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+ uint32 value_shm_id; //!< uint32
+ uint32 value_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniformMatrix2fv.
+struct UniformMatrix2fvImmediate {
+ static const CommandId kCmdId = 410;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+};
+
+//! Command that corresponds to glUniformMatrix3fv.
+struct UniformMatrix3fv {
+ static const CommandId kCmdId = 411;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+ uint32 value_shm_id; //!< uint32
+ uint32 value_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniformMatrix3fv.
+struct UniformMatrix3fvImmediate {
+ static const CommandId kCmdId = 412;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+};
+
+//! Command that corresponds to glUniformMatrix4fv.
+struct UniformMatrix4fv {
+ static const CommandId kCmdId = 413;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+ uint32 value_shm_id; //!< uint32
+ uint32 value_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glUniformMatrix4fv.
+struct UniformMatrix4fvImmediate {
+ static const CommandId kCmdId = 414;
+
+ CommandHeader header;
+ int32 location; //!< GLint
+ int32 count; //!< GLsizei
+ uint32 transpose; //!< GLboolean
+};
+
+//! Command that corresponds to glUseProgram.
+struct UseProgram {
+ static const CommandId kCmdId = 415;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+};
+
+//! Command that corresponds to glValidateProgram.
+struct ValidateProgram {
+ static const CommandId kCmdId = 416;
+
+ CommandHeader header;
+ uint32 program; //!< GLuint
+};
+
+//! Command that corresponds to glVertexAttrib1f.
+struct VertexAttrib1f {
+ static const CommandId kCmdId = 417;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ float x; //!< GLfloat
+};
+
+//! Command that corresponds to glVertexAttrib1fv.
+struct VertexAttrib1fv {
+ static const CommandId kCmdId = 418;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ uint32 values_shm_id; //!< uint32
+ uint32 values_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glVertexAttrib1fv.
+struct VertexAttrib1fvImmediate {
+ static const CommandId kCmdId = 419;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+};
+
+//! Command that corresponds to glVertexAttrib2f.
+struct VertexAttrib2f {
+ static const CommandId kCmdId = 420;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+};
+
+//! Command that corresponds to glVertexAttrib2fv.
+struct VertexAttrib2fv {
+ static const CommandId kCmdId = 421;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ uint32 values_shm_id; //!< uint32
+ uint32 values_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glVertexAttrib2fv.
+struct VertexAttrib2fvImmediate {
+ static const CommandId kCmdId = 422;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+};
+
+//! Command that corresponds to glVertexAttrib3f.
+struct VertexAttrib3f {
+ static const CommandId kCmdId = 423;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+ float z; //!< GLfloat
+};
+
+//! Command that corresponds to glVertexAttrib3fv.
+struct VertexAttrib3fv {
+ static const CommandId kCmdId = 424;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ uint32 values_shm_id; //!< uint32
+ uint32 values_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glVertexAttrib3fv.
+struct VertexAttrib3fvImmediate {
+ static const CommandId kCmdId = 425;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+};
+
+//! Command that corresponds to glVertexAttrib4f.
+struct VertexAttrib4f {
+ static const CommandId kCmdId = 426;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ float x; //!< GLfloat
+ float y; //!< GLfloat
+ float z; //!< GLfloat
+ float w; //!< GLfloat
+};
+
+//! Command that corresponds to glVertexAttrib4fv.
+struct VertexAttrib4fv {
+ static const CommandId kCmdId = 427;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ uint32 values_shm_id; //!< uint32
+ uint32 values_shm_offset; //!< uint32
+};
+
+//! Immediate version of command that corresponds to glVertexAttrib4fv.
+struct VertexAttrib4fvImmediate {
+ static const CommandId kCmdId = 428;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+};
+
+//! Command that corresponds to glVertexAttribPointer.
+struct VertexAttribPointer {
+ static const CommandId kCmdId = 429;
+
+ CommandHeader header;
+ uint32 indx; //!< GLuint
+ int32 size; //!< GLint
+ uint32 type; //!< GLenum
+ uint32 normalized; //!< GLboolean
+ int32 stride; //!< GLsizei
+ uint32 offset; //!< GLuint
+};
+
+//! Command that corresponds to glViewport.
+struct Viewport {
+ static const CommandId kCmdId = 430;
+
+ CommandHeader header;
+ int32 x; //!< GLint
+ int32 y; //!< GLint
+ int32 width; //!< GLsizei
+ int32 height; //!< GLsizei
+};
+
+//! Command that corresponds to SwapBuffers.
+struct SwapBuffers {
+ static const CommandId kCmdId = 431;
+
+ CommandHeader header;
+};
+
+//! Command that corresponds to GetMaxValueInBuffer.
+struct GetMaxValueInBuffer {
+ static const CommandId kCmdId = 436;
+
+ typedef GLuint Result;
+
+ CommandHeader header;
+ uint32 buffer_id; //!< GLuint
+ int32 count; //!< GLsizei
+ uint32 type; //!< GLenum
+ uint32 offset; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+//! Command that generates shared ids for contexts that share resources.
+struct GenSharedIds {
+ static const CommandId kCmdId = 439;
+
+ CommandHeader header;
+ uint32 namespace_id; //!< GLuint
+ uint32 id_offset; //!< GLuint
+ int32 n; //!< GLsizei
+ uint32 ids_shm_id; //!< uint32
+ uint32 ids_shm_offset; //!< uint32
+};
+
+//! Command that deletes shared ids.
+struct DeleteSharedIds {
+ static const CommandId kCmdId = 440;
+
+ CommandHeader header;
+ uint32 namespace_id; //!< GLuint
+ int32 n; //!< GLsizei
+ uint32 ids_shm_id; //!< uint32
+ uint32 ids_shm_offset; //!< uint32
+};
+
+//! Command that registers shared ids. It is an error to attempt
+//! to register an id that is already registered.
+struct RegisterSharedIds {
+ static const CommandId kCmdId = 441;
+
+ CommandHeader header;
+ uint32 namespace_id; //!< GLuint
+ int32 n; //!< GLsizei
+ uint32 ids_shm_id; //!< uint32
+ uint32 ids_shm_offset; //!< uint32
+};
+
+//! Command that enables features. The bucket should contain the feature string.
+struct CommandBufferEnable {
+ static const CommandId kCmdId = 442;
+
+ typedef GLint Result;
+
+ CommandHeader header;
+ uint32 bucket_id; //!< GLuint
+ uint32 result_shm_id; //!< uint32
+ uint32 result_shm_offset; //!< uint32
+};
+
+
diff --git a/gpu/command_buffer/service/BUILD.gn b/gpu/command_buffer/service/BUILD.gn
new file mode 100644
index 0000000..e6c58b6
--- /dev/null
+++ b/gpu/command_buffer/service/BUILD.gn
@@ -0,0 +1,159 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/ui.gni")
+import("//third_party/protobuf/proto_library.gni")
+
+source_set("service") {
+ sources = [
+ "async_pixel_transfer_delegate.cc",
+ "async_pixel_transfer_delegate.h",
+ "async_pixel_transfer_manager_android.cc",
+ "async_pixel_transfer_manager_idle.cc",
+ "async_pixel_transfer_manager_idle.h",
+ "async_pixel_transfer_manager_linux.cc",
+ "async_pixel_transfer_manager_mac.cc",
+ "async_pixel_transfer_manager_share_group.cc",
+ "async_pixel_transfer_manager_share_group.h",
+ "async_pixel_transfer_manager_stub.cc",
+ "async_pixel_transfer_manager_stub.h",
+ "async_pixel_transfer_manager_sync.cc",
+ "async_pixel_transfer_manager_sync.h",
+ "async_pixel_transfer_manager_win.cc",
+ "async_pixel_transfer_manager.cc",
+ "async_pixel_transfer_manager.h",
+ "buffer_manager.h",
+ "buffer_manager.cc",
+ "cmd_buffer_engine.h",
+ "cmd_parser.cc",
+ "cmd_parser.h",
+ "command_buffer_service.cc",
+ "command_buffer_service.h",
+ "common_decoder.cc",
+ "common_decoder.h",
+ "context_group.h",
+ "context_group.cc",
+ "context_state.h",
+ "context_state_autogen.h",
+ "context_state_impl_autogen.h",
+ "context_state.cc",
+ "error_state.cc",
+ "error_state.h",
+ "feature_info.h",
+ "feature_info.cc",
+ "framebuffer_manager.h",
+ "framebuffer_manager.cc",
+ "gles2_cmd_copy_texture_chromium.cc",
+ "gles2_cmd_copy_texture_chromium.h",
+ "gles2_cmd_decoder.h",
+ "gles2_cmd_decoder_autogen.h",
+ "gles2_cmd_decoder.cc",
+ "gles2_cmd_validation.h",
+ "gles2_cmd_validation.cc",
+ "gles2_cmd_validation_autogen.h",
+ "gles2_cmd_validation_implementation_autogen.h",
+ "gl_context_virtual.cc",
+ "gl_context_virtual.h",
+ "gl_state_restorer_impl.cc",
+ "gl_state_restorer_impl.h",
+ "gl_utils.h",
+ "gpu_scheduler.cc",
+ "gpu_scheduler.h",
+ "gpu_scheduler_mock.h",
+ "gpu_state_tracer.cc",
+ "gpu_state_tracer.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "gpu_tracer.cc",
+ "gpu_tracer.h",
+ "id_manager.h",
+ "id_manager.cc",
+ "image_manager.cc",
+ "image_manager.h",
+ "in_process_command_buffer.cc",
+ "in_process_command_buffer.h",
+ "logger.cc",
+ "logger.h",
+ "mailbox_manager.cc",
+ "mailbox_manager.h",
+ "mailbox_synchronizer.cc",
+ "mailbox_synchronizer.h",
+ "memory_program_cache.h",
+ "memory_program_cache.cc",
+ "mocks.h",
+ "program_manager.h",
+ "program_manager.cc",
+ "query_manager.h",
+ "query_manager.cc",
+ "renderbuffer_manager.h",
+ "renderbuffer_manager.cc",
+ "program_cache.h",
+ "program_cache.cc",
+ "shader_manager.h",
+ "shader_manager.cc",
+ "shader_translator.h",
+ "shader_translator.cc",
+ "shader_translator_cache.h",
+ "shader_translator_cache.cc",
+ "stream_texture_manager_in_process_android.h",
+ "stream_texture_manager_in_process_android.cc",
+ "texture_definition.h",
+ "texture_definition.cc",
+ "texture_manager.h",
+ "texture_manager.cc",
+ "transfer_buffer_manager.cc",
+ "transfer_buffer_manager.h",
+ "vertex_array_manager.h",
+ "vertex_array_manager.cc",
+ "vertex_attrib_manager.h",
+ "vertex_attrib_manager.cc",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ configs += [
+ "//third_party/khronos:khronos_headers",
+ ]
+
+ # Prefer mesa GL headers to system headers, which cause problems on Win.
+ include_dirs = [ "//third_party/mesa/src/include" ]
+
+ public_deps = [
+ "//gpu/command_buffer/common",
+ ]
+ deps = [
+ ":disk_cache_proto",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//crypto",
+ "//third_party/angle:translator",
+ "//third_party/protobuf:protobuf_lite",
+ "//third_party/re2",
+ "//third_party/smhasher:cityhash",
+ "//ui/gfx",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+
+ if (ui_compositor_image_transport) {
+ include_dirs += [ "//third_party/khronos" ]
+ }
+
+ if (is_win || is_android || (is_linux && use_x11)) {
+ sources += [
+ "async_pixel_transfer_manager_egl.cc",
+ "async_pixel_transfer_manager_egl.h",
+ ]
+ }
+
+ if (is_android && !is_debug) {
+ # On Android optimize more since this component can be a bottleneck.
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+}
+
+proto_library("disk_cache_proto") {
+ sources = [ "disk_cache_proto.proto" ]
+}
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate.cc b/gpu/command_buffer/service/async_pixel_transfer_delegate.cc
new file mode 100644
index 0000000..201026b
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+AsyncMemoryParams::AsyncMemoryParams(scoped_refptr<Buffer> buffer,
+ uint32 data_offset,
+ uint32 data_size)
+ : buffer_(buffer), data_offset_(data_offset), data_size_(data_size) {
+ DCHECK(buffer_.get());
+ DCHECK(buffer_->memory());
+}
+
+AsyncMemoryParams::~AsyncMemoryParams() {
+}
+
+AsyncPixelTransferUploadStats::AsyncPixelTransferUploadStats()
+ : texture_upload_count_(0) {}
+
+AsyncPixelTransferUploadStats::~AsyncPixelTransferUploadStats() {}
+
+void AsyncPixelTransferUploadStats::AddUpload(base::TimeDelta transfer_time) {
+ base::AutoLock scoped_lock(lock_);
+ texture_upload_count_++;
+ total_texture_upload_time_ += transfer_time;
+}
+
+int AsyncPixelTransferUploadStats::GetStats(
+ base::TimeDelta* total_texture_upload_time) {
+ base::AutoLock scoped_lock(lock_);
+ if (total_texture_upload_time)
+ *total_texture_upload_time = total_texture_upload_time_;
+ return texture_upload_count_;
+}
+
+AsyncPixelTransferDelegate::AsyncPixelTransferDelegate() {}
+
+AsyncPixelTransferDelegate::~AsyncPixelTransferDelegate() {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate.h b/gpu/command_buffer/service/async_pixel_transfer_delegate.h
new file mode 100644
index 0000000..b41bcd5
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace base {
+class SharedMemory;
+}
+
+namespace gpu {
+
+struct AsyncTexImage2DParams {
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLint border;
+ GLenum format;
+ GLenum type;
+};
+
+struct AsyncTexSubImage2DParams {
+ GLenum target;
+ GLint level;
+ GLint xoffset;
+ GLint yoffset;
+ GLsizei width;
+ GLsizei height;
+ GLenum format;
+ GLenum type;
+};
+
+class AsyncMemoryParams {
+ public:
+ AsyncMemoryParams(scoped_refptr<Buffer> buffer,
+ uint32 data_offset,
+ uint32 data_size);
+ ~AsyncMemoryParams();
+
+ scoped_refptr<Buffer> buffer() const { return buffer_; }
+ uint32 data_size() const { return data_size_; }
+ uint32 data_offset() const { return data_offset_; }
+ void* GetDataAddress() const {
+ return buffer_->GetDataAddress(data_offset_, data_size_);
+ }
+
+ private:
+ scoped_refptr<Buffer> buffer_;
+ uint32 data_offset_;
+ uint32 data_size_;
+};
+
+class AsyncPixelTransferUploadStats
+ : public base::RefCountedThreadSafe<AsyncPixelTransferUploadStats> {
+ public:
+ AsyncPixelTransferUploadStats();
+
+ void AddUpload(base::TimeDelta transfer_time);
+ int GetStats(base::TimeDelta* total_texture_upload_time);
+
+ private:
+ friend class base::RefCountedThreadSafe<AsyncPixelTransferUploadStats>;
+
+ ~AsyncPixelTransferUploadStats();
+
+ int texture_upload_count_;
+ base::TimeDelta total_texture_upload_time_;
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferUploadStats);
+};
+
+class GPU_EXPORT AsyncPixelTransferDelegate {
+ public:
+ virtual ~AsyncPixelTransferDelegate();
+
+ // The callback occurs on the caller thread, once the texture is
+ // safe/ready to be used.
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) = 0;
+
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) = 0;
+
+ // Returns true if there is a transfer in progress.
+ virtual bool TransferIsInProgress() = 0;
+
+ // Block until the specified transfer completes.
+ virtual void WaitForTransferCompletion() = 0;
+
+ protected:
+ AsyncPixelTransferDelegate();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegate);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_H_
+
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc
new file mode 100644
index 0000000..8e2d75f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+
+namespace gpu {
+
+MockAsyncPixelTransferDelegate::MockAsyncPixelTransferDelegate() {
+}
+
+MockAsyncPixelTransferDelegate::~MockAsyncPixelTransferDelegate() {
+ Destroy();
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h
new file mode 100644
index 0000000..9d28730
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockAsyncPixelTransferDelegate : public AsyncPixelTransferDelegate {
+ public:
+ MockAsyncPixelTransferDelegate();
+ virtual ~MockAsyncPixelTransferDelegate();
+
+ // Called in ~MockAsyncPixelTransferDelegate.
+ MOCK_METHOD0(Destroy, void());
+
+ // Implement AsyncPixelTransferDelegate.
+ MOCK_METHOD3(AsyncTexImage2D,
+ void(const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback));
+ MOCK_METHOD2(AsyncTexSubImage2D,
+ void(const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params));
+ MOCK_METHOD0(TransferIsInProgress, bool());
+ MOCK_METHOD0(WaitForTransferCompletion, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAsyncPixelTransferDelegate);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_DELEGATE_MOCK
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager.cc b/gpu/command_buffer/service/async_pixel_transfer_manager.cc
new file mode 100644
index 0000000..efc893a
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager.cc
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+AsyncPixelTransferCompletionObserver::AsyncPixelTransferCompletionObserver() {}
+
+AsyncPixelTransferCompletionObserver::~AsyncPixelTransferCompletionObserver() {}
+
+AsyncPixelTransferManager::AsyncPixelTransferManager() {}
+
+AsyncPixelTransferManager::~AsyncPixelTransferManager() {
+ if (manager_)
+ manager_->RemoveObserver(this);
+
+ for (TextureToDelegateMap::iterator ref = delegate_map_.begin();
+ ref != delegate_map_.end();
+ ref++) {
+ ref->first->RemoveObserver();
+ }
+}
+
+void AsyncPixelTransferManager::Initialize(gles2::TextureManager* manager) {
+ manager_ = manager;
+ manager_->AddObserver(this);
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManager::CreatePixelTransferDelegate(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ DCHECK(!GetPixelTransferDelegate(ref));
+ AsyncPixelTransferDelegate* delegate =
+ CreatePixelTransferDelegateImpl(ref, define_params);
+ delegate_map_[ref] = make_linked_ptr(delegate);
+ ref->AddObserver();
+ return delegate;
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManager::GetPixelTransferDelegate(
+ gles2::TextureRef* ref) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(ref);
+ if (it == delegate_map_.end()) {
+ return NULL;
+ } else {
+ return it->second.get();
+ }
+}
+
+void AsyncPixelTransferManager::ClearPixelTransferDelegateForTest(
+ gles2::TextureRef* ref) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(ref);
+ if (it != delegate_map_.end()) {
+ delegate_map_.erase(it);
+ ref->RemoveObserver();
+ }
+}
+
+bool AsyncPixelTransferManager::AsyncTransferIsInProgress(
+ gles2::TextureRef* ref) {
+ AsyncPixelTransferDelegate* delegate = GetPixelTransferDelegate(ref);
+ return delegate && delegate->TransferIsInProgress();
+}
+
+void AsyncPixelTransferManager::OnTextureManagerDestroying(
+ gles2::TextureManager* manager) {
+ // TextureManager should outlive AsyncPixelTransferManager.
+ NOTREACHED();
+ manager_ = NULL;
+}
+
+void AsyncPixelTransferManager::OnTextureRefDestroying(
+ gles2::TextureRef* texture) {
+ TextureToDelegateMap::iterator it = delegate_map_.find(texture);
+ if (it != delegate_map_.end()) {
+ delegate_map_.erase(it);
+ texture->RemoveObserver();
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager.h b/gpu/command_buffer/service/async_pixel_transfer_manager.h
new file mode 100644
index 0000000..1a818f3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager.h
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
+
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/gpu_export.h"
+
+#if defined(COMPILER_GCC)
+namespace BASE_HASH_NAMESPACE {
+template <>
+ struct hash<gpu::gles2::TextureRef*> {
+ size_t operator()(gpu::gles2::TextureRef* ptr) const {
+ return hash<size_t>()(reinterpret_cast<size_t>(ptr));
+ }
+};
+} // namespace BASE_HASH_NAMESPACE
+#endif // COMPILER
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gpu {
+class AsyncPixelTransferDelegate;
+class AsyncMemoryParams;
+struct AsyncTexImage2DParams;
+
+class AsyncPixelTransferCompletionObserver
+ : public base::RefCountedThreadSafe<AsyncPixelTransferCompletionObserver> {
+ public:
+ AsyncPixelTransferCompletionObserver();
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) = 0;
+
+ protected:
+ virtual ~AsyncPixelTransferCompletionObserver();
+
+ private:
+ friend class base::RefCountedThreadSafe<AsyncPixelTransferCompletionObserver>;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferCompletionObserver);
+};
+
+class GPU_EXPORT AsyncPixelTransferManager
+ : public gles2::TextureManager::DestructionObserver {
+ public:
+ static AsyncPixelTransferManager* Create(gfx::GLContext* context);
+
+ virtual ~AsyncPixelTransferManager();
+
+ void Initialize(gles2::TextureManager* texture_manager);
+
+ virtual void BindCompletedAsyncTransfers() = 0;
+
+ // There's no guarantee that callback will run on the caller thread.
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) = 0;
+
+ virtual uint32 GetTextureUploadCount() = 0;
+ virtual base::TimeDelta GetTotalTextureUploadTime() = 0;
+
+ // ProcessMorePendingTransfers() will be called at a good time
+ // to process a small amount of pending transfer work while
+ // NeedsProcessMorePendingTransfers() returns true. Implementations
+ // that can't dispatch work to separate threads should use
+ // this to avoid blocking the caller thread inappropriately.
+ virtual void ProcessMorePendingTransfers() = 0;
+ virtual bool NeedsProcessMorePendingTransfers() = 0;
+
+ // Wait for all AsyncTex(Sub)Image2D uploads to finish before returning.
+ virtual void WaitAllAsyncTexImage2D() = 0;
+
+ AsyncPixelTransferDelegate* CreatePixelTransferDelegate(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params);
+
+ AsyncPixelTransferDelegate* GetPixelTransferDelegate(
+ gles2::TextureRef* ref);
+
+ void ClearPixelTransferDelegateForTest(gles2::TextureRef* ref);
+
+ bool AsyncTransferIsInProgress(gles2::TextureRef* ref);
+
+ // gles2::TextureRef::DestructionObserver implementation:
+ virtual void OnTextureManagerDestroying(gles2::TextureManager* manager)
+ OVERRIDE;
+ virtual void OnTextureRefDestroying(gles2::TextureRef* texture) OVERRIDE;
+
+ protected:
+ AsyncPixelTransferManager();
+
+ private:
+ gles2::TextureManager* manager_;
+
+ typedef base::hash_map<gles2::TextureRef*,
+ linked_ptr<AsyncPixelTransferDelegate> >
+ TextureToDelegateMap;
+ TextureToDelegateMap delegate_map_;
+
+ // A factory method called by CreatePixelTransferDelegate that is overriden
+ // by each implementation.
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc
new file mode 100644
index 0000000..eadc34f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_android.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_sync.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace {
+
+enum GpuType {
+ GPU_BROADCOM,
+ GPU_IMAGINATION,
+ GPU_NVIDIA_ES31,
+ GPU_ADRENO_420,
+ GPU_OTHER,
+};
+
+std::string MakeString(const char* s) {
+ return std::string(s ? s : "");
+}
+
+GpuType GetGpuType() {
+ const std::string vendor = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_VENDOR)));
+ const std::string renderer = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_RENDERER)));
+ const std::string version = MakeString(
+ reinterpret_cast<const char*>(glGetString(GL_VERSION)));
+
+ if (vendor.find("Broadcom") != std::string::npos)
+ return GPU_BROADCOM;
+
+ if (vendor.find("Imagination") != std::string::npos)
+ return GPU_IMAGINATION;
+
+ if (vendor.find("NVIDIA") != std::string::npos &&
+ version.find("OpenGL ES 3.1") != std::string::npos) {
+ return GPU_NVIDIA_ES31;
+ }
+
+ if (vendor.find("Qualcomm") != std::string::npos &&
+ renderer.find("Adreno (TM) 420") != std::string::npos) {
+ return GPU_ADRENO_420;
+ }
+
+ return GPU_OTHER;
+}
+
+bool AllowTransferThreadForGpu() {
+ GpuType gpu = GetGpuType();
+ return gpu != GPU_BROADCOM && gpu != GPU_IMAGINATION &&
+ gpu != GPU_NVIDIA_ES31 && gpu != GPU_ADRENO_420;
+}
+
+}
+
+// We only used threaded uploads when we can:
+// - Create EGLImages out of OpenGL textures (EGL_KHR_gl_texture_2D_image)
+// - Bind EGLImages to OpenGL textures (GL_OES_EGL_image)
+// - Use fences (to test for upload completion).
+// - The heap size is large enough.
+// TODO(kaanb|epenner): Remove the IsImagination() check pending the
+// resolution of crbug.com/249147
+// TODO(kaanb|epenner): Remove the IsLowEndDevice() check pending the
+// resolution of crbug.com/271929
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ DCHECK(context->IsCurrent(NULL));
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationEGLGLES2:
+ DCHECK(context);
+ if (!base::SysInfo::IsLowEndDevice() &&
+ context->HasExtension("EGL_KHR_fence_sync") &&
+ context->HasExtension("EGL_KHR_image") &&
+ context->HasExtension("EGL_KHR_image_base") &&
+ context->HasExtension("EGL_KHR_gl_texture_2D_image") &&
+ context->HasExtension("GL_OES_EGL_image") &&
+ AllowTransferThreadForGpu()) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager_CreateWithThread");
+ return new AsyncPixelTransferManagerEGL;
+ }
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationOSMesaGL: {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager_CreateIdle");
+ return new AsyncPixelTransferManagerIdle;
+ }
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
new file mode 100644
index 0000000..e153617
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
@@ -0,0 +1,752 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
+
+#include <list>
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+bool CheckErrors(const char* file, int line) {
+ EGLint eglerror;
+ GLenum glerror;
+ bool success = true;
+ while ((eglerror = eglGetError()) != EGL_SUCCESS) {
+ LOG(ERROR) << "Async transfer EGL error at "
+ << file << ":" << line << " " << eglerror;
+ success = false;
+ }
+ while ((glerror = glGetError()) != GL_NO_ERROR) {
+ LOG(ERROR) << "Async transfer OpenGL error at "
+ << file << ":" << line << " " << glerror;
+ success = false;
+ }
+ return success;
+}
+#define CHECK_GL() CheckErrors(__FILE__, __LINE__)
+
+const char kAsyncTransferThreadName[] = "AsyncTransferThread";
+
+// Regular glTexImage2D call.
+void DoTexImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
+ glTexImage2D(
+ GL_TEXTURE_2D, tex_params.level, tex_params.internal_format,
+ tex_params.width, tex_params.height,
+ tex_params.border, tex_params.format, tex_params.type, data);
+}
+
+// Regular glTexSubImage2D call.
+void DoTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, void* data) {
+ glTexSubImage2D(
+ GL_TEXTURE_2D, tex_params.level,
+ tex_params.xoffset, tex_params.yoffset,
+ tex_params.width, tex_params.height,
+ tex_params.format, tex_params.type, data);
+}
+
+// Full glTexSubImage2D call, from glTexImage2D params.
+void DoFullTexSubImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
+ glTexSubImage2D(
+ GL_TEXTURE_2D, tex_params.level,
+ 0, 0, tex_params.width, tex_params.height,
+ tex_params.format, tex_params.type, data);
+}
+
+void SetGlParametersForEglImageTexture() {
+ // These params are needed for EGLImage creation to succeed on several
+ // Android devices. I couldn't find this requirement in the EGLImage
+ // extension spec, but several devices fail without it.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+}
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+class TransferThread : public base::Thread {
+ public:
+ TransferThread() : base::Thread(kAsyncTransferThreadName) {
+ Start();
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+ virtual ~TransferThread() {
+ Stop();
+ }
+
+ virtual void Init() OVERRIDE {
+ gfx::GLShareGroup* share_group = NULL;
+ surface_ = new gfx::PbufferGLSurfaceEGL(gfx::Size(1, 1));
+ surface_->Initialize();
+ context_ = gfx::GLContext::CreateGLContext(
+ share_group, surface_.get(), gfx::PreferDiscreteGpu);
+ bool is_current = context_->MakeCurrent(surface_.get());
+ DCHECK(is_current);
+ }
+
+ virtual void CleanUp() OVERRIDE {
+ surface_ = NULL;
+ context_->ReleaseCurrent(surface_.get());
+ context_ = NULL;
+ }
+
+ private:
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(TransferThread);
+};
+
+base::LazyInstance<TransferThread>
+ g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
+
+base::MessageLoopProxy* transfer_message_loop_proxy() {
+ return g_transfer_thread.Pointer()->message_loop_proxy().get();
+}
+
+// Class which holds async pixel transfers state (EGLImage).
+// The EGLImage is accessed by either thread, but everything
+// else accessed only on the main thread.
+class TransferStateInternal
+ : public base::RefCountedThreadSafe<TransferStateInternal> {
+ public:
+ TransferStateInternal(GLuint texture_id,
+ const AsyncTexImage2DParams& define_params,
+ bool wait_for_uploads,
+ bool wait_for_creation,
+ bool use_image_preserved)
+ : texture_id_(texture_id),
+ thread_texture_id_(0),
+ transfer_completion_(true, true),
+ egl_image_(EGL_NO_IMAGE_KHR),
+ wait_for_uploads_(wait_for_uploads),
+ wait_for_creation_(wait_for_creation),
+ use_image_preserved_(use_image_preserved) {
+ define_params_ = define_params;
+ }
+
+ bool TransferIsInProgress() {
+ return !transfer_completion_.IsSignaled();
+ }
+
+ void BindTransfer() {
+ TRACE_EVENT2("gpu", "BindAsyncTransfer glEGLImageTargetTexture2DOES",
+ "width", define_params_.width,
+ "height", define_params_.height);
+ DCHECK(texture_id_);
+ if (EGL_NO_IMAGE_KHR == egl_image_)
+ return;
+
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
+ bind_callback_.Run();
+
+ DCHECK(CHECK_GL());
+ }
+
+ void CreateEglImage(GLuint texture_id) {
+ TRACE_EVENT0("gpu", "eglCreateImageKHR");
+ DCHECK(texture_id);
+ DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR);
+
+ EGLDisplay egl_display = eglGetCurrentDisplay();
+ EGLContext egl_context = eglGetCurrentContext();
+ EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR;
+ EGLClientBuffer egl_buffer =
+ reinterpret_cast<EGLClientBuffer>(texture_id);
+
+ EGLint image_preserved = use_image_preserved_ ? EGL_TRUE : EGL_FALSE;
+ EGLint egl_attrib_list[] = {
+ EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level.
+ EGL_IMAGE_PRESERVED_KHR, image_preserved,
+ EGL_NONE
+ };
+ egl_image_ = eglCreateImageKHR(
+ egl_display,
+ egl_context,
+ egl_target,
+ egl_buffer,
+ egl_attrib_list);
+
+ DLOG_IF(ERROR, EGL_NO_IMAGE_KHR == egl_image_)
+ << "eglCreateImageKHR failed";
+ }
+
+ void CreateEglImageOnUploadThread() {
+ CreateEglImage(thread_texture_id_);
+ }
+
+ void CreateEglImageOnMainThreadIfNeeded() {
+ if (egl_image_ == EGL_NO_IMAGE_KHR) {
+ CreateEglImage(texture_id_);
+ if (wait_for_creation_) {
+ TRACE_EVENT0("gpu", "glFinish creation");
+ glFinish();
+ }
+ }
+ }
+
+ void WaitForLastUpload() {
+ // This glFinish is just a safe-guard for if uploads have some
+ // GPU action that needs to occur. We could use fences and try
+ // to do this less often. However, on older drivers fences are
+ // not always reliable (eg. Mali-400 just blocks forever).
+ if (wait_for_uploads_) {
+ TRACE_EVENT0("gpu", "glFinish");
+ glFinish();
+ }
+ }
+
+ void MarkAsTransferIsInProgress() {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ transfer_completion_.Reset();
+ }
+
+ void MarkAsCompleted() {
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_completion_.Signal();
+ }
+
+ void WaitForTransferCompletion() {
+ TRACE_EVENT0("gpu", "WaitForTransferCompletion");
+ // TODO(backer): Deschedule the channel rather than blocking the main GPU
+ // thread (crbug.com/240265).
+ transfer_completion_.Wait();
+ }
+
+ void PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexImage",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK(!thread_texture_id_);
+ DCHECK_EQ(0, tex_params.level);
+ if (EGL_NO_IMAGE_KHR != egl_image_) {
+ MarkAsCompleted();
+ return;
+ }
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D no data");
+ glGenTextures(1, &thread_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+
+ SetGlParametersForEglImageTexture();
+
+ // If we need to use image_preserved, we pass the data with
+ // the allocation. Otherwise we use a NULL allocation to
+ // try to avoid any costs associated with creating the EGLImage.
+ if (use_image_preserved_)
+ DoTexImage2D(tex_params, data);
+ else
+ DoTexImage2D(tex_params, NULL);
+ }
+
+ CreateEglImageOnUploadThread();
+
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D with data");
+
+ // If we didn't use image_preserved, we haven't uploaded
+ // the data yet, so we do this with a full texSubImage.
+ if (!use_image_preserved_)
+ DoFullTexSubImage2D(tex_params, data);
+ }
+
+ WaitForLastUpload();
+ MarkAsCompleted();
+
+ DCHECK(CHECK_GL());
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ void PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexSubImage2D",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+
+ DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
+ DCHECK_EQ(0, tex_params.level);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ if (!thread_texture_id_) {
+ TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES");
+ glGenTextures(1, &thread_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
+ } else {
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
+ }
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ DoTexSubImage2D(tex_params, data);
+ }
+ WaitForLastUpload();
+ MarkAsCompleted();
+
+ DCHECK(CHECK_GL());
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<TransferStateInternal>;
+ friend class gpu::AsyncPixelTransferDelegateEGL;
+
+ static void DeleteTexture(GLuint id) {
+ glDeleteTextures(1, &id);
+ }
+
+ virtual ~TransferStateInternal() {
+ if (egl_image_ != EGL_NO_IMAGE_KHR) {
+ EGLDisplay display = eglGetCurrentDisplay();
+ eglDestroyImageKHR(display, egl_image_);
+ }
+ if (thread_texture_id_) {
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(&DeleteTexture, thread_texture_id_));
+ }
+ }
+
+ // The 'real' texture.
+ GLuint texture_id_;
+
+ // The EGLImage sibling on the upload thread.
+ GLuint thread_texture_id_;
+
+ // Definition params for texture that needs binding.
+ AsyncTexImage2DParams define_params_;
+
+ // Indicates that an async transfer is in progress.
+ base::WaitableEvent transfer_completion_;
+
+ // It would be nice if we could just create a new EGLImage for
+ // every upload, but I found that didn't work, so this stores
+ // one for the lifetime of the texture.
+ EGLImageKHR egl_image_;
+
+ // Callback to invoke when AsyncTexImage2D is complete
+ // and the client can safely use the texture. This occurs
+ // during BindCompletedAsyncTransfers().
+ base::Closure bind_callback_;
+
+ // Customize when we block on fences (these are work-arounds).
+ bool wait_for_uploads_;
+ bool wait_for_creation_;
+ bool use_image_preserved_;
+};
+
+} // namespace
+
+// Class which handles async pixel transfers using EGLImageKHR and another
+// upload thread
+class AsyncPixelTransferDelegateEGL
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> {
+ public:
+ AsyncPixelTransferDelegateEGL(
+ AsyncPixelTransferManagerEGL::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateEGL();
+
+ void BindTransfer() { state_->BindTransfer(); }
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // Returns true if a work-around was used.
+ bool WorkAroundAsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback);
+ bool WorkAroundAsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params);
+
+ // A raw pointer is safe because the SharedState is owned by the Manager,
+ // which owns this Delegate.
+ AsyncPixelTransferManagerEGL::SharedState* shared_state_;
+ scoped_refptr<TransferStateInternal> state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL);
+};
+
+AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL(
+ AsyncPixelTransferManagerEGL::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : shared_state_(shared_state) {
+ // We can't wait on uploads on imagination (it can take 200ms+).
+ // In practice, they are complete when the CPU glTexSubImage2D completes.
+ bool wait_for_uploads = !shared_state_->is_imagination;
+
+ // Qualcomm runs into texture corruption problems if the same texture is
+ // uploaded to with both async and normal uploads. Synchronize after EGLImage
+ // creation on the main thread as a work-around.
+ bool wait_for_creation = shared_state_->is_qualcomm;
+
+ // Qualcomm has a race when using image_preserved=FALSE,
+ // which can result in black textures even after the first upload.
+ // Since using FALSE is mainly for performance (to avoid layout changes),
+ // but Qualcomm itself doesn't seem to get any performance benefit,
+ // we just using image_preservedd=TRUE on Qualcomm as a work-around.
+ bool use_image_preserved =
+ shared_state_->is_qualcomm || shared_state_->is_imagination;
+
+ state_ = new TransferStateInternal(texture_id,
+ define_params,
+ wait_for_uploads,
+ wait_for_creation,
+ use_image_preserved);
+}
+
+AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
+
+bool AsyncPixelTransferDelegateEGL::TransferIsInProgress() {
+ return state_->TransferIsInProgress();
+}
+
+void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion() {
+ if (state_->TransferIsInProgress()) {
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display);
+#endif
+
+ state_->WaitForTransferCompletion();
+ DCHECK(!state_->TransferIsInProgress());
+
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+}
+
+void AsyncPixelTransferDelegateEGL::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ if (WorkAroundAsyncTexImage2D(tex_params, mem_params, bind_callback))
+ return;
+
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(state_->egl_image_, EGL_NO_IMAGE_KHR);
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ // Mark the transfer in progress and save the late bind
+ // callback, so we can notify the client when it is bound.
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->bind_callback_ = bind_callback;
+
+ // Mark the transfer in progress.
+ state_->MarkAsTransferIsInProgress();
+
+ // Duplicate the shared memory so there is no way we can get
+ // a use-after-free of the raw pixels.
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(
+ &TransferStateInternal::PerformAsyncTexImage2D,
+ state_,
+ tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats));
+
+ DCHECK(CHECK_GL());
+}
+
+void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+ if (WorkAroundAsyncTexSubImage2D(tex_params, mem_params))
+ return;
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ // Mark the transfer in progress.
+ state_->MarkAsTransferIsInProgress();
+
+ // If this wasn't async allocated, we don't have an EGLImage yet.
+ // Create the EGLImage if it hasn't already been created.
+ state_->CreateEglImageOnMainThreadIfNeeded();
+
+ // Duplicate the shared memory so there are no way we can get
+ // a use-after-free of the raw pixels.
+ transfer_message_loop_proxy()->PostTask(FROM_HERE,
+ base::Bind(
+ &TransferStateInternal::PerformAsyncTexSubImage2D,
+ state_,
+ tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats));
+
+ DCHECK(CHECK_GL());
+}
+
+namespace {
+bool IsPowerOfTwo (unsigned int x) {
+ return ((x != 0) && !(x & (x - 1)));
+}
+
+bool IsMultipleOfEight(unsigned int x) {
+ return (x & 7) == 0;
+}
+
+bool DimensionsSupportImgFastPath(int width, int height) {
+ // Multiple of eight, but not a power of two.
+ return IsMultipleOfEight(width) &&
+ IsMultipleOfEight(height) &&
+ !(IsPowerOfTwo(width) &&
+ IsPowerOfTwo(height));
+}
+} // namespace
+
+// It is very difficult to stream uploads on Imagination GPUs:
+// - glTexImage2D defers a swizzle/stall until draw-time
+// - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
+// or longer if OpenGL is in heavy use by another thread.
+// The one combination that avoids these problems requires:
+// a.) Allocations/Uploads must occur on different threads/contexts.
+// b.) Texture size must be non-power-of-two.
+// When using a+b, uploads will be incorrect/corrupt unless:
+// c.) Texture size must be a multiple-of-eight.
+//
+// To achieve a.) we allocate synchronously on the main thread followed
+// by uploading on the upload thread. When b/c are not true we fall back
+// on purely synchronous allocation/upload on the main thread.
+
+bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ if (!shared_state_->is_imagination)
+ return false;
+
+ // On imagination we allocate synchronously all the time, even
+ // if the dimensions support fast uploads. This is for part a.)
+ // above, so allocations occur on a different thread/context as uploads.
+ void* data = mem_params.GetDataAddress();
+ SetGlParametersForEglImageTexture();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D with data");
+ DoTexImage2D(tex_params, data);
+ }
+
+ // The allocation has already occured, so mark it as finished
+ // and ready for binding.
+ CHECK(!state_->TransferIsInProgress());
+
+ // If the dimensions support fast async uploads, create the
+ // EGLImage for future uploads. The late bind should not
+ // be needed since the EGLImage was created from the main thread
+ // texture, but this is required to prevent an imagination driver crash.
+ if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) {
+ state_->CreateEglImageOnMainThreadIfNeeded();
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->bind_callback_ = bind_callback;
+ }
+
+ DCHECK(CHECK_GL());
+ return true;
+}
+
+bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ if (!shared_state_->is_imagination)
+ return false;
+
+ // If the dimensions support fast async uploads, we can use the
+ // normal async upload path for uploads.
+ if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
+ return false;
+
+ // Fall back on a synchronous stub as we don't have a known fast path.
+ // Also, older ICS drivers crash when we do any glTexSubImage2D on the
+ // same thread. To work around this we do glTexImage2D instead. Since
+ // we didn't create an EGLImage for this texture (see above), this is
+ // okay, but it limits this API to full updates for now.
+ DCHECK(!state_->egl_image_);
+ DCHECK_EQ(tex_params.xoffset, 0);
+ DCHECK_EQ(tex_params.yoffset, 0);
+ DCHECK_EQ(state_->define_params_.width, tex_params.width);
+ DCHECK_EQ(state_->define_params_.height, tex_params.height);
+ DCHECK_EQ(state_->define_params_.level, tex_params.level);
+ DCHECK_EQ(state_->define_params_.format, tex_params.format);
+ DCHECK_EQ(state_->define_params_.type, tex_params.type);
+
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time;
+ if (shared_state_->texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ // Note we use define_params_ instead of tex_params.
+ // The DCHECKs above verify this is always the same.
+ DoTexImage2D(state_->define_params_, data);
+ }
+ if (shared_state_->texture_upload_stats.get()) {
+ shared_state_->texture_upload_stats
+ ->AddUpload(base::TimeTicks::HighResNow() - begin_time);
+ }
+
+ DCHECK(CHECK_GL());
+ return true;
+}
+
+AsyncPixelTransferManagerEGL::SharedState::SharedState()
+ // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
+ : texture_upload_stats(new AsyncPixelTransferUploadStats) {
+ const char* vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
+ if (vendor) {
+ is_imagination =
+ std::string(vendor).find("Imagination") != std::string::npos;
+ is_qualcomm = std::string(vendor).find("Qualcomm") != std::string::npos;
+ }
+}
+
+AsyncPixelTransferManagerEGL::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() {}
+
+AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {}
+
+void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() {
+ scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
+
+ while(!shared_state_.pending_allocations.empty()) {
+ if (!shared_state_.pending_allocations.front().get()) {
+ shared_state_.pending_allocations.pop_front();
+ continue;
+ }
+ AsyncPixelTransferDelegateEGL* delegate =
+ shared_state_.pending_allocations.front().get();
+ // Terminate early, as all transfers finish in order, currently.
+ if (delegate->TransferIsInProgress())
+ break;
+
+ if (!texture_binder)
+ texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
+
+ // If the transfer is finished, bind it to the texture
+ // and remove it from pending list.
+ delegate->BindTransfer();
+ shared_state_.pending_allocations.pop_front();
+ }
+}
+
+void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ // Post a PerformNotifyCompletion task to the upload thread. This task
+ // will run after all async transfers are complete.
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer)));
+}
+
+uint32 AsyncPixelTransferManagerEGL::GetTextureUploadCount() {
+ return shared_state_.texture_upload_stats->GetStats(NULL);
+}
+
+base::TimeDelta AsyncPixelTransferManagerEGL::GetTotalTextureUploadTime() {
+ base::TimeDelta total_texture_upload_time;
+ shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
+ return total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerEGL::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateEGL* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateEGL(
+ &shared_state_, ref->service_id(), define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
new file mode 100644
index 0000000..8f0c4b3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/memory/ref_counted.h"
+
+namespace gpu {
+class AsyncPixelTransferDelegateEGL;
+class AsyncPixelTransferUploadStats;
+
+class AsyncPixelTransferManagerEGL : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerEGL();
+ virtual ~AsyncPixelTransferManagerEGL();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats;
+ bool is_imagination;
+ bool is_qualcomm;
+ typedef std::list<base::WeakPtr<AsyncPixelTransferDelegateEGL> >
+ TransferQueue;
+ TransferQueue pending_allocations;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerEGL);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_EGL_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
new file mode 100644
index 0000000..40ec87f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
@@ -0,0 +1,324 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/memory/weak_ptr.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+static uint64 g_next_pixel_transfer_state_id = 1;
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+} // namespace
+
+// Class which handles async pixel transfers in a platform
+// independent way.
+class AsyncPixelTransferDelegateIdle
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateIdle> {
+ public:
+ AsyncPixelTransferDelegateIdle(
+ AsyncPixelTransferManagerIdle::SharedState* state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateIdle();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ void PerformAsyncTexImage2D(AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ const base::Closure& bind_callback);
+ void PerformAsyncTexSubImage2D(AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params);
+
+ uint64 id_;
+ GLuint texture_id_;
+ bool transfer_in_progress_;
+ AsyncTexImage2DParams define_params_;
+
+ // Safe to hold a raw pointer because SharedState is owned by the Manager
+ // which owns the Delegate.
+ AsyncPixelTransferManagerIdle::SharedState* shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateIdle);
+};
+
+AsyncPixelTransferDelegateIdle::AsyncPixelTransferDelegateIdle(
+ AsyncPixelTransferManagerIdle::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : id_(g_next_pixel_transfer_state_id++),
+ texture_id_(texture_id),
+ transfer_in_progress_(false),
+ define_params_(define_params),
+ shared_state_(shared_state) {}
+
+AsyncPixelTransferDelegateIdle::~AsyncPixelTransferDelegateIdle() {}
+
+void AsyncPixelTransferDelegateIdle::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+
+ shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
+ id_,
+ this,
+ base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexImage2D,
+ AsWeakPtr(),
+ tex_params,
+ mem_params,
+ bind_callback)));
+
+ transfer_in_progress_ = true;
+}
+
+void AsyncPixelTransferDelegateIdle::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+
+ shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
+ id_,
+ this,
+ base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D,
+ AsWeakPtr(),
+ tex_params,
+ mem_params)));
+
+ transfer_in_progress_ = true;
+}
+
+bool AsyncPixelTransferDelegateIdle::TransferIsInProgress() {
+ return transfer_in_progress_;
+}
+
+void AsyncPixelTransferDelegateIdle::WaitForTransferCompletion() {
+ for (std::list<AsyncPixelTransferManagerIdle::Task>::iterator iter =
+ shared_state_->tasks.begin();
+ iter != shared_state_->tasks.end();
+ ++iter) {
+ if (iter->transfer_id != id_)
+ continue;
+
+ (*iter).task.Run();
+ shared_state_->tasks.erase(iter);
+ break;
+ }
+
+ shared_state_->ProcessNotificationTasks();
+}
+
+void AsyncPixelTransferDelegateIdle::PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT2("gpu", "PerformAsyncTexImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ gfx::ScopedTextureBinder texture_binder(tex_params.target, texture_id_);
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ }
+
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_in_progress_ = false;
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+
+ // The texture is already fully bound so just call it now.
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params) {
+ TRACE_EVENT2("gpu", "PerformAsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+
+ void* data = mem_params.GetDataAddress();
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ gfx::ScopedTextureBinder texture_binder(tex_params.target, texture_id_);
+
+ // If it's a full texture update, use glTexImage2D as it's faster.
+ // TODO(epenner): Make this configurable (http://crbug.com/259924)
+ if (tex_params.xoffset == 0 &&
+ tex_params.yoffset == 0 &&
+ tex_params.target == define_params_.target &&
+ tex_params.level == define_params_.level &&
+ tex_params.width == define_params_.width &&
+ tex_params.height == define_params_.height) {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(
+ define_params_.target,
+ define_params_.level,
+ define_params_.internal_format,
+ define_params_.width,
+ define_params_.height,
+ define_params_.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ } else {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ glTexSubImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ }
+
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ transfer_in_progress_ = false;
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+}
+
+AsyncPixelTransferManagerIdle::Task::Task(
+ uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task)
+ : transfer_id(transfer_id),
+ delegate(delegate),
+ task(task) {
+}
+
+AsyncPixelTransferManagerIdle::Task::~Task() {}
+
+AsyncPixelTransferManagerIdle::SharedState::SharedState()
+ : texture_upload_count(0) {}
+
+AsyncPixelTransferManagerIdle::SharedState::~SharedState() {}
+
+void AsyncPixelTransferManagerIdle::SharedState::ProcessNotificationTasks() {
+ while (!tasks.empty()) {
+ // Stop when we reach a pixel transfer task.
+ if (tasks.front().transfer_id)
+ return;
+
+ tasks.front().task.Run();
+ tasks.pop_front();
+ }
+}
+
+AsyncPixelTransferManagerIdle::AsyncPixelTransferManagerIdle()
+ : shared_state_() {
+}
+
+AsyncPixelTransferManagerIdle::~AsyncPixelTransferManagerIdle() {}
+
+void AsyncPixelTransferManagerIdle::BindCompletedAsyncTransfers() {
+ // Everything is already bound.
+}
+
+void AsyncPixelTransferManagerIdle::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ if (shared_state_.tasks.empty()) {
+ observer->DidComplete(mem_params);
+ return;
+ }
+
+ shared_state_.tasks.push_back(
+ Task(0, // 0 transfer_id for notification tasks.
+ NULL,
+ base::Bind(
+ &PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer))));
+}
+
+uint32 AsyncPixelTransferManagerIdle::GetTextureUploadCount() {
+ return shared_state_.texture_upload_count;
+}
+
+base::TimeDelta AsyncPixelTransferManagerIdle::GetTotalTextureUploadTime() {
+ return shared_state_.total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerIdle::ProcessMorePendingTransfers() {
+ if (shared_state_.tasks.empty())
+ return;
+
+ // First task should always be a pixel transfer task.
+ DCHECK(shared_state_.tasks.front().transfer_id);
+ shared_state_.tasks.front().task.Run();
+ shared_state_.tasks.pop_front();
+
+ shared_state_.ProcessNotificationTasks();
+}
+
+bool AsyncPixelTransferManagerIdle::NeedsProcessMorePendingTransfers() {
+ return !shared_state_.tasks.empty();
+}
+
+void AsyncPixelTransferManagerIdle::WaitAllAsyncTexImage2D() {
+ if (shared_state_.tasks.empty())
+ return;
+
+ const Task& task = shared_state_.tasks.back();
+ if (task.delegate)
+ task.delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerIdle::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateIdle(&shared_state_,
+ ref->service_id(),
+ define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
new file mode 100644
index 0000000..af3262f
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
@@ -0,0 +1,68 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
+
+#include <list>
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerIdle : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerIdle();
+ virtual ~AsyncPixelTransferManagerIdle();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ struct Task {
+ Task(uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task);
+ ~Task();
+
+ // This is non-zero if pixel transfer task.
+ uint64 transfer_id;
+
+ AsyncPixelTransferDelegate* delegate;
+
+ base::Closure task;
+ };
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+ void ProcessNotificationTasks();
+
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+ std::list<Task> tasks;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerIdle);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_IDLE_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc
new file mode 100644
index 0000000..8d25f00
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_linux.cc
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableShareGroupAsyncTextureUpload)) {
+ DCHECK(context);
+ return static_cast<AsyncPixelTransferManager*> (
+ new AsyncPixelTransferManagerShareGroup(context));
+ }
+
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationEGLGLES2:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc
new file mode 100644
index 0000000..8c19b57
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mac.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationAppleGL:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc
new file mode 100644
index 0000000..84e95e3
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+MockAsyncPixelTransferManager::MockAsyncPixelTransferManager() {}
+
+MockAsyncPixelTransferManager::~MockAsyncPixelTransferManager() {}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
new file mode 100644
index 0000000..3bc8b6b
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockAsyncPixelTransferManager : public AsyncPixelTransferManager {
+ public:
+ MockAsyncPixelTransferManager();
+ virtual ~MockAsyncPixelTransferManager();
+
+ // AsyncPixelTransferManager implementation:
+ MOCK_METHOD0(BindCompletedAsyncTransfers, void());
+ MOCK_METHOD2(AsyncNotifyCompletion,
+ void(const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer));
+ MOCK_METHOD0(GetTextureUploadCount, uint32());
+ MOCK_METHOD0(GetTotalTextureUploadTime, base::TimeDelta());
+ MOCK_METHOD0(ProcessMorePendingTransfers, void());
+ MOCK_METHOD0(NeedsProcessMorePendingTransfers, bool());
+ MOCK_METHOD0(WaitAllAsyncTexImage2D, void());
+ MOCK_METHOD2(
+ CreatePixelTransferDelegateImpl,
+ AsyncPixelTransferDelegate*(gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAsyncPixelTransferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_TEST_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
new file mode 100644
index 0000000..99103b8
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
@@ -0,0 +1,555 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace gpu {
+
+namespace {
+
+const char kAsyncTransferThreadName[] = "AsyncTransferThread";
+
+void PerformNotifyCompletion(
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
+ TRACE_EVENT0("gpu", "PerformNotifyCompletion");
+ observer->DidComplete(mem_params);
+}
+
+// TODO(backer): Factor out common thread scheduling logic from the EGL and
+// ShareGroup implementations. http://crbug.com/239889
+class TransferThread : public base::Thread {
+ public:
+ TransferThread()
+ : base::Thread(kAsyncTransferThreadName),
+ initialized_(false) {
+ Start();
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ SetPriority(base::kThreadPriority_Background);
+#endif
+ }
+
+ virtual ~TransferThread() {
+ // The only instance of this class was declared leaky.
+ NOTREACHED();
+ }
+
+ void InitializeOnMainThread(gfx::GLContext* parent_context) {
+ TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
+ if (initialized_)
+ return;
+
+ base::WaitableEvent wait_for_init(true, false);
+ message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&TransferThread::InitializeOnTransferThread,
+ base::Unretained(this),
+ base::Unretained(parent_context),
+ &wait_for_init));
+ wait_for_init.Wait();
+ }
+
+ virtual void CleanUp() OVERRIDE {
+ surface_ = NULL;
+ context_ = NULL;
+ }
+
+ private:
+ bool initialized_;
+
+ scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<gfx::GLContext> context_;
+
+ void InitializeOnTransferThread(gfx::GLContext* parent_context,
+ base::WaitableEvent* caller_wait) {
+ TRACE_EVENT0("gpu", "InitializeOnTransferThread");
+
+ if (!parent_context) {
+ LOG(ERROR) << "No parent context provided.";
+ caller_wait->Signal();
+ return;
+ }
+
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
+ if (!surface_.get()) {
+ LOG(ERROR) << "Unable to create GLSurface";
+ caller_wait->Signal();
+ return;
+ }
+
+ // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
+ // we would probably want to use a PBO texture upload for a true async
+ // upload (that would hopefully be optimized as a DMA transfer by the
+ // driver).
+ context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
+ surface_.get(),
+ gfx::PreferIntegratedGpu);
+ if (!context_.get()) {
+ LOG(ERROR) << "Unable to create GLContext.";
+ caller_wait->Signal();
+ return;
+ }
+
+ context_->MakeCurrent(surface_.get());
+ initialized_ = true;
+ caller_wait->Signal();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(TransferThread);
+};
+
+base::LazyInstance<TransferThread>::Leaky
+ g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
+
+base::MessageLoopProxy* transfer_message_loop_proxy() {
+ return g_transfer_thread.Pointer()->message_loop_proxy().get();
+}
+
+class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
+ public:
+ explicit PendingTask(const base::Closure& task)
+ : task_(task), task_pending_(true, false) {}
+
+ bool TryRun() {
+ // This is meant to be called on the main thread where the texture
+ // is already bound.
+ DCHECK(checker_.CalledOnValidThread());
+ if (task_lock_.Try()) {
+ // Only run once.
+ if (!task_.is_null())
+ task_.Run();
+ task_.Reset();
+
+ task_lock_.Release();
+ task_pending_.Signal();
+ return true;
+ }
+ return false;
+ }
+
+ void BindAndRun(GLuint texture_id) {
+ // This is meant to be called on the upload thread where we don't have to
+ // restore the previous texture binding.
+ DCHECK(!checker_.CalledOnValidThread());
+ base::AutoLock locked(task_lock_);
+ if (!task_.is_null()) {
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ task_.Run();
+ task_.Reset();
+ glBindTexture(GL_TEXTURE_2D, 0);
+ // Flush for synchronization between threads.
+ glFlush();
+ task_pending_.Signal();
+ }
+ }
+
+ void Cancel() {
+ base::AutoLock locked(task_lock_);
+ task_.Reset();
+ task_pending_.Signal();
+ }
+
+ bool TaskIsInProgress() {
+ return !task_pending_.IsSignaled();
+ }
+
+ void WaitForTask() {
+ task_pending_.Wait();
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<PendingTask>;
+
+ virtual ~PendingTask() {}
+
+ base::ThreadChecker checker_;
+
+ base::Lock task_lock_;
+ base::Closure task_;
+ base::WaitableEvent task_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingTask);
+};
+
+// Class which holds async pixel transfers state.
+// The texture_id is accessed by either thread, but everything
+// else accessed only on the main thread.
+class TransferStateInternal
+ : public base::RefCountedThreadSafe<TransferStateInternal> {
+ public:
+ TransferStateInternal(GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : texture_id_(texture_id), define_params_(define_params) {}
+
+ bool TransferIsInProgress() {
+ return pending_upload_task_.get() &&
+ pending_upload_task_->TaskIsInProgress();
+ }
+
+ void BindTransfer() {
+ TRACE_EVENT2("gpu", "BindAsyncTransfer",
+ "width", define_params_.width,
+ "height", define_params_.height);
+ DCHECK(texture_id_);
+
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ bind_callback_.Run();
+ }
+
+ void WaitForTransferCompletion() {
+ TRACE_EVENT0("gpu", "WaitForTransferCompletion");
+ DCHECK(pending_upload_task_.get());
+ if (!pending_upload_task_->TryRun()) {
+ pending_upload_task_->WaitForTask();
+ }
+ pending_upload_task_ = NULL;
+ }
+
+ void CancelUpload() {
+ TRACE_EVENT0("gpu", "CancelUpload");
+ if (pending_upload_task_.get())
+ pending_upload_task_->Cancel();
+ pending_upload_task_ = NULL;
+ }
+
+ void ScheduleAsyncTexImage2D(
+ const AsyncTexImage2DParams tex_params,
+ const AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
+ const base::Closure& bind_callback) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ pending_upload_task_ = new PendingTask(base::Bind(
+ &TransferStateInternal::PerformAsyncTexImage2D,
+ this,
+ tex_params,
+ mem_params,
+ texture_upload_stats));
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
+
+ // Save the late bind callback, so we can notify the client when it is
+ // bound.
+ bind_callback_ = bind_callback;
+ }
+
+ void ScheduleAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
+ pending_upload_task_ = new PendingTask(base::Bind(
+ &TransferStateInternal::PerformAsyncTexSubImage2D,
+ this,
+ tex_params,
+ mem_params,
+ texture_upload_stats));
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<TransferStateInternal>;
+
+ virtual ~TransferStateInternal() {
+ }
+
+ void PerformAsyncTexImage2D(
+ AsyncTexImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexImage",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK_EQ(0, tex_params.level);
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ void* data = mem_params.GetDataAddress();
+
+ {
+ TRACE_EVENT0("gpu", "glTexImage2D");
+ glTexImage2D(GL_TEXTURE_2D,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ }
+
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ void PerformAsyncTexSubImage2D(
+ AsyncTexSubImage2DParams tex_params,
+ AsyncMemoryParams mem_params,
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
+ TRACE_EVENT2("gpu",
+ "PerformAsyncTexSubImage2D",
+ "width",
+ tex_params.width,
+ "height",
+ tex_params.height);
+ DCHECK_EQ(0, tex_params.level);
+
+ base::TimeTicks begin_time;
+ if (texture_upload_stats.get())
+ begin_time = base::TimeTicks::HighResNow();
+
+ void* data = mem_params.GetDataAddress();
+ {
+ TRACE_EVENT0("gpu", "glTexSubImage2D");
+ glTexSubImage2D(GL_TEXTURE_2D,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
+ }
+
+ if (texture_upload_stats.get()) {
+ texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
+ begin_time);
+ }
+ }
+
+ scoped_refptr<PendingTask> pending_upload_task_;
+
+ GLuint texture_id_;
+
+ // Definition params for texture that needs binding.
+ AsyncTexImage2DParams define_params_;
+
+ // Callback to invoke when AsyncTexImage2D is complete
+ // and the client can safely use the texture. This occurs
+ // during BindCompletedAsyncTransfers().
+ base::Closure bind_callback_;
+};
+
+} // namespace
+
+class AsyncPixelTransferDelegateShareGroup
+ : public AsyncPixelTransferDelegate,
+ public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
+ public:
+ AsyncPixelTransferDelegateShareGroup(
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params);
+ virtual ~AsyncPixelTransferDelegateShareGroup();
+
+ void BindTransfer() { state_->BindTransfer(); }
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // A raw pointer is safe because the SharedState is owned by the Manager,
+ // which owns this Delegate.
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
+ scoped_refptr<TransferStateInternal> state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
+};
+
+AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
+ AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
+ GLuint texture_id,
+ const AsyncTexImage2DParams& define_params)
+ : shared_state_(shared_state),
+ state_(new TransferStateInternal(texture_id, define_params)) {}
+
+AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
+ TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
+ state_->CancelUpload();
+}
+
+bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
+ return state_->TransferIsInProgress();
+}
+
+void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
+ if (state_->TransferIsInProgress()) {
+ state_->WaitForTransferCompletion();
+ DCHECK(!state_->TransferIsInProgress());
+ }
+
+ // Fast track the BindTransfer, if applicable.
+ for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
+ iter = shared_state_->pending_allocations.begin();
+ iter != shared_state_->pending_allocations.end();
+ ++iter) {
+ if (iter->get() != this)
+ continue;
+
+ shared_state_->pending_allocations.erase(iter);
+ BindTransfer();
+ break;
+ }
+}
+
+void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ shared_state_->pending_allocations.push_back(AsWeakPtr());
+ state_->ScheduleAsyncTexImage2D(tex_params,
+ mem_params,
+ shared_state_->texture_upload_stats,
+ bind_callback);
+}
+
+void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
+ "width", tex_params.width,
+ "height", tex_params.height);
+ DCHECK(!state_->TransferIsInProgress());
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
+ DCHECK_EQ(tex_params.level, 0);
+
+ state_->ScheduleAsyncTexSubImage2D(
+ tex_params, mem_params, shared_state_->texture_upload_stats);
+}
+
+AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
+ // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
+ : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
+
+AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
+ gfx::GLContext* context) {
+ g_transfer_thread.Pointer()->InitializeOnMainThread(context);
+}
+
+AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
+
+void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
+ scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
+
+ while (!shared_state_.pending_allocations.empty()) {
+ if (!shared_state_.pending_allocations.front().get()) {
+ shared_state_.pending_allocations.pop_front();
+ continue;
+ }
+ AsyncPixelTransferDelegateShareGroup* delegate =
+ shared_state_.pending_allocations.front().get();
+ // Terminate early, as all transfers finish in order, currently.
+ if (delegate->TransferIsInProgress())
+ break;
+
+ if (!texture_binder)
+ texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
+
+ // Used to set tex info from the gles2 cmd decoder once upload has
+ // finished (it'll bind the texture and call a callback).
+ delegate->BindTransfer();
+
+ shared_state_.pending_allocations.pop_front();
+ }
+}
+
+void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ // Post a PerformNotifyCompletion task to the upload thread. This task
+ // will run after all async transfers are complete.
+ transfer_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&PerformNotifyCompletion,
+ mem_params,
+ make_scoped_refptr(observer)));
+}
+
+uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
+ return shared_state_.texture_upload_stats->GetStats(NULL);
+}
+
+base::TimeDelta
+AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
+ base::TimeDelta total_texture_upload_time;
+ shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
+ return total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateShareGroup* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateShareGroup(
+ &shared_state_, ref->service_id(), define_params);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
new file mode 100644
index 0000000..64daffe
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/memory/ref_counted.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gpu {
+class AsyncPixelTransferDelegateShareGroup;
+class AsyncPixelTransferUploadStats;
+
+class AsyncPixelTransferManagerShareGroup : public AsyncPixelTransferManager {
+ public:
+ explicit AsyncPixelTransferManagerShareGroup(gfx::GLContext* context);
+ virtual ~AsyncPixelTransferManagerShareGroup();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats;
+ typedef std::list<base::WeakPtr<AsyncPixelTransferDelegateShareGroup> >
+ TransferQueue;
+ TransferQueue pending_allocations;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerShareGroup);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SHARE_GROUP_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
new file mode 100644
index 0000000..d5f96b0
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
@@ -0,0 +1,91 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+class AsyncPixelTransferDelegateStub : public AsyncPixelTransferDelegate {
+ public:
+ AsyncPixelTransferDelegateStub();
+ virtual ~AsyncPixelTransferDelegateStub();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateStub);
+};
+
+AsyncPixelTransferDelegateStub::AsyncPixelTransferDelegateStub() {}
+
+AsyncPixelTransferDelegateStub::~AsyncPixelTransferDelegateStub() {}
+
+void AsyncPixelTransferDelegateStub::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateStub::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+}
+
+bool AsyncPixelTransferDelegateStub::TransferIsInProgress() {
+ return false;
+}
+
+void AsyncPixelTransferDelegateStub::WaitForTransferCompletion() {}
+
+AsyncPixelTransferManagerStub::AsyncPixelTransferManagerStub() {}
+
+AsyncPixelTransferManagerStub::~AsyncPixelTransferManagerStub() {}
+
+void AsyncPixelTransferManagerStub::BindCompletedAsyncTransfers() {
+}
+
+void AsyncPixelTransferManagerStub::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ observer->DidComplete(mem_params);
+}
+
+uint32 AsyncPixelTransferManagerStub::GetTextureUploadCount() {
+ return 0;
+}
+
+base::TimeDelta AsyncPixelTransferManagerStub::GetTotalTextureUploadTime() {
+ return base::TimeDelta();
+}
+
+void AsyncPixelTransferManagerStub::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerStub::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerStub::WaitAllAsyncTexImage2D() {
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerStub::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateStub();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
new file mode 100644
index 0000000..a93ce94
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerStub : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerStub();
+ virtual ~AsyncPixelTransferManagerStub();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerStub);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_STUB_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
new file mode 100644
index 0000000..cd7d087
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
@@ -0,0 +1,141 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_sync.h"
+
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+
+namespace gpu {
+
+// Class which handles async pixel transfers synchronously.
+class AsyncPixelTransferDelegateSync : public AsyncPixelTransferDelegate {
+ public:
+ explicit AsyncPixelTransferDelegateSync(
+ AsyncPixelTransferManagerSync::SharedState* shared_state);
+ virtual ~AsyncPixelTransferDelegateSync();
+
+ // Implement AsyncPixelTransferDelegate:
+ virtual void AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) OVERRIDE;
+ virtual void AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) OVERRIDE;
+ virtual bool TransferIsInProgress() OVERRIDE;
+ virtual void WaitForTransferCompletion() OVERRIDE;
+
+ private:
+ // Safe to hold a raw pointer because SharedState is owned by the Manager
+ // which owns the Delegate.
+ AsyncPixelTransferManagerSync::SharedState* shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateSync);
+};
+
+AsyncPixelTransferDelegateSync::AsyncPixelTransferDelegateSync(
+ AsyncPixelTransferManagerSync::SharedState* shared_state)
+ : shared_state_(shared_state) {}
+
+AsyncPixelTransferDelegateSync::~AsyncPixelTransferDelegateSync() {}
+
+void AsyncPixelTransferDelegateSync::AsyncTexImage2D(
+ const AsyncTexImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params,
+ const base::Closure& bind_callback) {
+ // Save the define params to return later during deferred
+ // binding of the transfer texture.
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ glTexImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.internal_format,
+ tex_params.width,
+ tex_params.height,
+ tex_params.border,
+ tex_params.format,
+ tex_params.type,
+ data);
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+ // The texture is already fully bound so just call it now.
+ bind_callback.Run();
+}
+
+void AsyncPixelTransferDelegateSync::AsyncTexSubImage2D(
+ const AsyncTexSubImage2DParams& tex_params,
+ const AsyncMemoryParams& mem_params) {
+ void* data = mem_params.GetDataAddress();
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ glTexSubImage2D(
+ tex_params.target,
+ tex_params.level,
+ tex_params.xoffset,
+ tex_params.yoffset,
+ tex_params.width,
+ tex_params.height,
+ tex_params.format,
+ tex_params.type,
+ data);
+ shared_state_->texture_upload_count++;
+ shared_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time;
+}
+
+bool AsyncPixelTransferDelegateSync::TransferIsInProgress() {
+ // Already done.
+ return false;
+}
+
+void AsyncPixelTransferDelegateSync::WaitForTransferCompletion() {
+ // Already done.
+}
+
+AsyncPixelTransferManagerSync::SharedState::SharedState()
+ : texture_upload_count(0) {}
+
+AsyncPixelTransferManagerSync::SharedState::~SharedState() {}
+
+AsyncPixelTransferManagerSync::AsyncPixelTransferManagerSync() {}
+
+AsyncPixelTransferManagerSync::~AsyncPixelTransferManagerSync() {}
+
+void AsyncPixelTransferManagerSync::BindCompletedAsyncTransfers() {
+ // Everything is already bound.
+}
+
+void AsyncPixelTransferManagerSync::AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) {
+ observer->DidComplete(mem_params);
+}
+
+uint32 AsyncPixelTransferManagerSync::GetTextureUploadCount() {
+ return shared_state_.texture_upload_count;
+}
+
+base::TimeDelta AsyncPixelTransferManagerSync::GetTotalTextureUploadTime() {
+ return shared_state_.total_texture_upload_time;
+}
+
+void AsyncPixelTransferManagerSync::ProcessMorePendingTransfers() {
+}
+
+bool AsyncPixelTransferManagerSync::NeedsProcessMorePendingTransfers() {
+ return false;
+}
+
+void AsyncPixelTransferManagerSync::WaitAllAsyncTexImage2D() {
+}
+
+AsyncPixelTransferDelegate*
+AsyncPixelTransferManagerSync::CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) {
+ return new AsyncPixelTransferDelegateSync(&shared_state_);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
new file mode 100644
index 0000000..7d0b8b6
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+namespace gpu {
+
+class AsyncPixelTransferManagerSync : public AsyncPixelTransferManager {
+ public:
+ AsyncPixelTransferManagerSync();
+ virtual ~AsyncPixelTransferManagerSync();
+
+ // AsyncPixelTransferManager implementation:
+ virtual void BindCompletedAsyncTransfers() OVERRIDE;
+ virtual void AsyncNotifyCompletion(
+ const AsyncMemoryParams& mem_params,
+ AsyncPixelTransferCompletionObserver* observer) OVERRIDE;
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual void ProcessMorePendingTransfers() OVERRIDE;
+ virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
+
+ // State shared between Managers and Delegates.
+ struct SharedState {
+ SharedState();
+ ~SharedState();
+
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+ };
+
+ private:
+ // AsyncPixelTransferManager implementation:
+ virtual AsyncPixelTransferDelegate* CreatePixelTransferDelegateImpl(
+ gles2::TextureRef* ref,
+ const AsyncTexImage2DParams& define_params) OVERRIDE;
+
+ SharedState shared_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferManagerSync);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ASYNC_PIXEL_TRANSFER_MANAGER_SYNC_H_
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc
new file mode 100644
index 0000000..6955885
--- /dev/null
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_win.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_idle.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_stub.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+AsyncPixelTransferManager* AsyncPixelTransferManager::Create(
+ gfx::GLContext* context) {
+ TRACE_EVENT0("gpu", "AsyncPixelTransferManager::Create");
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationOSMesaGL:
+ case gfx::kGLImplementationDesktopGL:
+ case gfx::kGLImplementationEGLGLES2:
+ return new AsyncPixelTransferManagerIdle;
+ case gfx::kGLImplementationMockGL:
+ return new AsyncPixelTransferManagerStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/buffer_manager.cc b/gpu/command_buffer/service/buffer_manager.cc
new file mode 100644
index 0000000..7b1c90d
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager.cc
@@ -0,0 +1,407 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include <limits>
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+BufferManager::BufferManager(
+ MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info)
+ : memory_tracker_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kManaged)),
+ feature_info_(feature_info),
+ allow_buffers_on_multiple_targets_(false),
+ buffer_count_(0),
+ have_context_(true),
+ use_client_side_arrays_for_stream_buffers_(
+ feature_info ? feature_info->workarounds(
+ ).use_client_side_arrays_for_stream_buffers : 0) {
+}
+
+BufferManager::~BufferManager() {
+ DCHECK(buffers_.empty());
+ CHECK_EQ(buffer_count_, 0u);
+}
+
+void BufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ buffers_.clear();
+ DCHECK_EQ(0u, memory_tracker_->GetMemRepresented());
+}
+
+void BufferManager::CreateBuffer(GLuint client_id, GLuint service_id) {
+ scoped_refptr<Buffer> buffer(new Buffer(this, service_id));
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(client_id, buffer));
+ DCHECK(result.second);
+}
+
+Buffer* BufferManager::GetBuffer(
+ GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ return it != buffers_.end() ? it->second.get() : NULL;
+}
+
+void BufferManager::RemoveBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ if (it != buffers_.end()) {
+ Buffer* buffer = it->second.get();
+ buffer->MarkAsDeleted();
+ buffers_.erase(it);
+ }
+}
+
+void BufferManager::StartTracking(Buffer* /* buffer */) {
+ ++buffer_count_;
+}
+
+void BufferManager::StopTracking(Buffer* buffer) {
+ memory_tracker_->TrackMemFree(buffer->size());
+ --buffer_count_;
+}
+
+Buffer::Buffer(BufferManager* manager, GLuint service_id)
+ : manager_(manager),
+ size_(0),
+ deleted_(false),
+ shadowed_(false),
+ is_client_side_array_(false),
+ service_id_(service_id),
+ target_(0),
+ usage_(GL_STATIC_DRAW) {
+ manager_->StartTracking(this);
+}
+
+Buffer::~Buffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteBuffersARB(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void Buffer::SetInfo(
+ GLsizeiptr size, GLenum usage, bool shadow, const GLvoid* data,
+ bool is_client_side_array) {
+ usage_ = usage;
+ is_client_side_array_ = is_client_side_array;
+ ClearCache();
+ if (size != size_ || shadow != shadowed_) {
+ shadowed_ = shadow;
+ size_ = size;
+ if (shadowed_) {
+ shadow_.reset(new int8[size]);
+ } else {
+ shadow_.reset();
+ }
+ }
+ if (shadowed_) {
+ if (data) {
+ memcpy(shadow_.get(), data, size);
+ } else {
+ memset(shadow_.get(), 0, size);
+ }
+ }
+}
+
+bool Buffer::CheckRange(
+ GLintptr offset, GLsizeiptr size) const {
+ int32 end = 0;
+ return offset >= 0 && size >= 0 &&
+ offset <= std::numeric_limits<int32>::max() &&
+ size <= std::numeric_limits<int32>::max() &&
+ SafeAddInt32(offset, size, &end) && end <= size_;
+}
+
+bool Buffer::SetRange(
+ GLintptr offset, GLsizeiptr size, const GLvoid * data) {
+ if (!CheckRange(offset, size)) {
+ return false;
+ }
+ if (shadowed_) {
+ memcpy(shadow_.get() + offset, data, size);
+ ClearCache();
+ }
+ return true;
+}
+
+const void* Buffer::GetRange(
+ GLintptr offset, GLsizeiptr size) const {
+ if (!shadowed_) {
+ return NULL;
+ }
+ if (!CheckRange(offset, size)) {
+ return NULL;
+ }
+ return shadow_.get() + offset;
+}
+
+void Buffer::ClearCache() {
+ range_set_.clear();
+}
+
+template <typename T>
+GLuint GetMaxValue(const void* data, GLuint offset, GLsizei count) {
+ GLuint max_value = 0;
+ const T* element = reinterpret_cast<const T*>(
+ static_cast<const int8*>(data) + offset);
+ const T* end = element + count;
+ for (; element < end; ++element) {
+ if (*element > max_value) {
+ max_value = *element;
+ }
+ }
+ return max_value;
+}
+
+bool Buffer::GetMaxValueForRange(
+ GLuint offset, GLsizei count, GLenum type, GLuint* max_value) {
+ Range range(offset, count, type);
+ RangeToMaxValueMap::iterator it = range_set_.find(range);
+ if (it != range_set_.end()) {
+ *max_value = it->second;
+ return true;
+ }
+
+ uint32 size;
+ if (!SafeMultiplyUint32(
+ count, GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type), &size)) {
+ return false;
+ }
+
+ if (!SafeAddUint32(offset, size, &size)) {
+ return false;
+ }
+
+ if (size > static_cast<uint32>(size_)) {
+ return false;
+ }
+
+ if (!shadowed_) {
+ return false;
+ }
+
+ // Scan the range for the max value and store
+ GLuint max_v = 0;
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ max_v = GetMaxValue<uint8>(shadow_.get(), offset, count);
+ break;
+ case GL_UNSIGNED_SHORT:
+ // Check we are not accessing an odd byte for a 2 byte value.
+ if ((offset & 1) != 0) {
+ return false;
+ }
+ max_v = GetMaxValue<uint16>(shadow_.get(), offset, count);
+ break;
+ case GL_UNSIGNED_INT:
+ // Check we are not accessing a non aligned address for a 4 byte value.
+ if ((offset & 3) != 0) {
+ return false;
+ }
+ max_v = GetMaxValue<uint32>(shadow_.get(), offset, count);
+ break;
+ default:
+ NOTREACHED(); // should never get here by validation.
+ break;
+ }
+ range_set_.insert(std::make_pair(range, max_v));
+ *max_value = max_v;
+ return true;
+}
+
+bool BufferManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (BufferMap::const_iterator it = buffers_.begin();
+ it != buffers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool BufferManager::IsUsageClientSideArray(GLenum usage) {
+ return usage == GL_STREAM_DRAW && use_client_side_arrays_for_stream_buffers_;
+}
+
+bool BufferManager::UseNonZeroSizeForClientSideArrayBuffer() {
+ return feature_info_.get() &&
+ feature_info_->workarounds()
+ .use_non_zero_size_for_client_side_stream_buffers;
+}
+
+void BufferManager::SetInfo(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data) {
+ DCHECK(buffer);
+ memory_tracker_->TrackMemFree(buffer->size());
+ bool is_client_side_array = IsUsageClientSideArray(usage);
+ bool shadow = buffer->target() == GL_ELEMENT_ARRAY_BUFFER ||
+ allow_buffers_on_multiple_targets_ ||
+ is_client_side_array;
+ buffer->SetInfo(size, usage, shadow, data, is_client_side_array);
+ memory_tracker_->TrackMemAlloc(buffer->size());
+}
+
+void BufferManager::ValidateAndDoBufferData(
+ ContextState* context_state, GLenum target, GLsizeiptr size,
+ const GLvoid * data, GLenum usage) {
+ ErrorState* error_state = context_state->GetErrorState();
+ if (!feature_info_->validators()->buffer_target.IsValid(target)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, "glBufferData", target, "target");
+ return;
+ }
+ if (!feature_info_->validators()->buffer_usage.IsValid(usage)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, "glBufferData", usage, "usage");
+ return;
+ }
+ if (size < 0) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferData", "size < 0");
+ return;
+ }
+
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferData", "unknown buffer");
+ return;
+ }
+
+ if (!memory_tracker_->EnsureGPUMemoryAvailable(size)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_OUT_OF_MEMORY, "glBufferData", "out of memory");
+ return;
+ }
+
+ DoBufferData(error_state, buffer, size, usage, data);
+}
+
+
+void BufferManager::DoBufferData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLsizeiptr size,
+ GLenum usage,
+ const GLvoid* data) {
+ // Clear the buffer to 0 if no initial data was passed in.
+ scoped_ptr<int8[]> zero;
+ if (!data) {
+ zero.reset(new int8[size]);
+ memset(zero.get(), 0, size);
+ data = zero.get();
+ }
+
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, "glBufferData");
+ if (IsUsageClientSideArray(usage)) {
+ GLsizei empty_size = UseNonZeroSizeForClientSideArrayBuffer() ? 1 : 0;
+ glBufferData(buffer->target(), empty_size, NULL, usage);
+ } else {
+ glBufferData(buffer->target(), size, data, usage);
+ }
+ GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, "glBufferData");
+ if (error == GL_NO_ERROR) {
+ SetInfo(buffer, size, usage, data);
+ } else {
+ SetInfo(buffer, 0, usage, NULL);
+ }
+}
+
+void BufferManager::ValidateAndDoBufferSubData(
+ ContextState* context_state, GLenum target, GLintptr offset, GLsizeiptr size,
+ const GLvoid * data) {
+ ErrorState* error_state = context_state->GetErrorState();
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_VALUE, "glBufferSubData",
+ "unknown buffer");
+ return;
+ }
+
+ DoBufferSubData(error_state, buffer, offset, size, data);
+}
+
+void BufferManager::DoBufferSubData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data) {
+ if (!buffer->SetRange(offset, size, data)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, "glBufferSubData", "out of range");
+ return;
+ }
+
+ if (!buffer->IsClientSideArray()) {
+ glBufferSubData(buffer->target(), offset, size, data);
+ }
+}
+
+void BufferManager::ValidateAndDoGetBufferParameteriv(
+ ContextState* context_state, GLenum target, GLenum pname, GLint* params) {
+ Buffer* buffer = GetBufferInfoForTarget(context_state, target);
+ if (!buffer) {
+ ERRORSTATE_SET_GL_ERROR(
+ context_state->GetErrorState(), GL_INVALID_OPERATION,
+ "glGetBufferParameteriv", "no buffer bound for target");
+ return;
+ }
+ switch (pname) {
+ case GL_BUFFER_SIZE:
+ *params = buffer->size();
+ break;
+ case GL_BUFFER_USAGE:
+ *params = buffer->usage();
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+bool BufferManager::SetTarget(Buffer* buffer, GLenum target) {
+ // Check that we are not trying to bind it to a different target.
+ if (buffer->target() != 0 && buffer->target() != target &&
+ !allow_buffers_on_multiple_targets_) {
+ return false;
+ }
+ if (buffer->target() == 0) {
+ buffer->set_target(target);
+ }
+ return true;
+}
+
+// Since one BufferManager can be shared by multiple decoders, ContextState is
+// passed in each time and not just passed in during initialization.
+Buffer* BufferManager::GetBufferInfoForTarget(
+ ContextState* state, GLenum target) {
+ DCHECK(target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER);
+ if (target == GL_ARRAY_BUFFER) {
+ return state->bound_array_buffer.get();
+ } else {
+ return state->vertex_attrib_manager->element_array_buffer();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/buffer_manager.h b/gpu/command_buffer/service/buffer_manager.h
new file mode 100644
index 0000000..cc23f01
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager.h
@@ -0,0 +1,286 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
+
+#include <map>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class BufferManager;
+struct ContextState;
+class ErrorState;
+class FeatureInfo;
+class TestHelper;
+
+// Info about Buffers currently in the system.
+class GPU_EXPORT Buffer : public base::RefCounted<Buffer> {
+ public:
+ Buffer(BufferManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLenum target() const {
+ return target_;
+ }
+
+ GLsizeiptr size() const {
+ return size_;
+ }
+
+ GLenum usage() const {
+ return usage_;
+ }
+
+ // Gets the maximum value in the buffer for the given range interpreted as
+ // the given type. Returns false if offset and count are out of range.
+ // offset is in bytes.
+ // count is in elements of type.
+ bool GetMaxValueForRange(GLuint offset, GLsizei count, GLenum type,
+ GLuint* max_value);
+
+ // Returns a pointer to shadowed data.
+ const void* GetRange(GLintptr offset, GLsizeiptr size) const;
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return target() && !IsDeleted();
+ }
+
+ bool IsClientSideArray() const {
+ return is_client_side_array_;
+ }
+
+ private:
+ friend class BufferManager;
+ friend class BufferManagerTestBase;
+ friend class base::RefCounted<Buffer>;
+
+ // Represents a range in a buffer.
+ class Range {
+ public:
+ Range(GLuint offset, GLsizei count, GLenum type)
+ : offset_(offset),
+ count_(count),
+ type_(type) {
+ }
+
+ // A less functor provided for std::map so it can find ranges.
+ struct Less {
+ bool operator() (const Range& lhs, const Range& rhs) const {
+ if (lhs.offset_ != rhs.offset_) {
+ return lhs.offset_ < rhs.offset_;
+ }
+ if (lhs.count_ != rhs.count_) {
+ return lhs.count_ < rhs.count_;
+ }
+ return lhs.type_ < rhs.type_;
+ }
+ };
+
+ private:
+ GLuint offset_;
+ GLsizei count_;
+ GLenum type_;
+ };
+
+ ~Buffer();
+
+ void set_target(GLenum target) {
+ DCHECK_EQ(target_, 0u); // you can only set this once.
+ target_ = target;
+ }
+
+ bool shadowed() const {
+ return shadowed_;
+ }
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // Sets the size, usage and initial data of a buffer.
+ // If shadow is true then if data is NULL buffer will be initialized to 0.
+ void SetInfo(
+ GLsizeiptr size, GLenum usage, bool shadow, const GLvoid* data,
+ bool is_client_side_array);
+
+ // Sets a range of data for this buffer. Returns false if the offset or size
+ // is out of range.
+ bool SetRange(
+ GLintptr offset, GLsizeiptr size, const GLvoid * data);
+
+ // Clears any cache of index ranges.
+ void ClearCache();
+
+ // Check if an offset, size range is valid for the current buffer.
+ bool CheckRange(GLintptr offset, GLsizeiptr size) const;
+
+ // The manager that owns this Buffer.
+ BufferManager* manager_;
+
+ // A copy of the data in the buffer. This data is only kept if the target
+ // is backed_ = true.
+ scoped_ptr<int8[]> shadow_;
+
+ // Size of buffer.
+ GLsizeiptr size_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // Whether or not the data is shadowed.
+ bool shadowed_;
+
+ // Whether or not this Buffer is not uploaded to the GPU but just
+ // sitting in local memory.
+ bool is_client_side_array_;
+
+ // Service side buffer id.
+ GLuint service_id_;
+
+ // The type of buffer. 0 = unset, GL_BUFFER_ARRAY = vertex data,
+ // GL_ELEMENT_BUFFER_ARRAY = index data.
+ // Once set a buffer can not be used for something else.
+ GLenum target_;
+
+ // Usage of buffer.
+ GLenum usage_;
+
+ // A map of ranges to the highest value in that range of a certain type.
+ typedef std::map<Range, GLuint, Range::Less> RangeToMaxValueMap;
+ RangeToMaxValueMap range_set_;
+};
+
+// This class keeps track of the buffers and their sizes so we can do
+// bounds checking.
+//
+// NOTE: To support shared resources an instance of this class will need to be
+// shared by multiple GLES2Decoders.
+class GPU_EXPORT BufferManager {
+ public:
+ BufferManager(MemoryTracker* memory_tracker, FeatureInfo* feature_info);
+ ~BufferManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Buffer for the given buffer.
+ void CreateBuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the buffer info for the given buffer.
+ Buffer* GetBuffer(GLuint client_id);
+
+ // Removes a buffer info for the given buffer.
+ void RemoveBuffer(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ // Validates a glBufferSubData, and then calls DoBufferData if validation was
+ // successful.
+ void ValidateAndDoBufferSubData(
+ ContextState* context_state, GLenum target, GLintptr offset,
+ GLsizeiptr size, const GLvoid * data);
+
+ // Validates a glBufferData, and then calls DoBufferData if validation was
+ // successful.
+ void ValidateAndDoBufferData(
+ ContextState* context_state, GLenum target, GLsizeiptr size,
+ const GLvoid * data, GLenum usage);
+
+ // Validates a glGetBufferParameteriv, and then calls GetBufferParameteriv if
+ // validation was successful.
+ void ValidateAndDoGetBufferParameteriv(
+ ContextState* context_state, GLenum target, GLenum pname, GLint* params);
+
+ // Sets the target of a buffer. Returns false if the target can not be set.
+ bool SetTarget(Buffer* buffer, GLenum target);
+
+ void set_allow_buffers_on_multiple_targets(bool allow) {
+ allow_buffers_on_multiple_targets_ = allow;
+ }
+
+ size_t mem_represented() const {
+ return memory_tracker_->GetMemRepresented();
+ }
+
+ // Tells for a given usage if this would be a client side array.
+ bool IsUsageClientSideArray(GLenum usage);
+
+ // Tells whether a buffer that is emulated using client-side arrays should be
+ // set to a non-zero size.
+ bool UseNonZeroSizeForClientSideArrayBuffer();
+
+ private:
+ friend class Buffer;
+ friend class TestHelper; // Needs access to DoBufferData.
+ friend class BufferManagerTestBase; // Needs access to DoBufferSubData.
+ void StartTracking(Buffer* buffer);
+ void StopTracking(Buffer* buffer);
+
+ Buffer* GetBufferInfoForTarget(ContextState* state, GLenum target);
+
+ // Does a glBufferSubData and updates the approriate accounting.
+ // Assumes the values have already been validated.
+ void DoBufferSubData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data);
+
+ // Does a glBufferData and updates the approprate accounting. Currently
+ // Assumes the values have already been validated.
+ void DoBufferData(
+ ErrorState* error_state,
+ Buffer* buffer,
+ GLsizeiptr size,
+ GLenum usage,
+ const GLvoid* data);
+
+ // Sets the size, usage and initial data of a buffer.
+ // If data is NULL buffer will be initialized to 0 if shadowed.
+ void SetInfo(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data);
+
+ scoped_ptr<MemoryTypeTracker> memory_tracker_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ // Info for each buffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Buffer> > BufferMap;
+ BufferMap buffers_;
+
+ // Whether or not buffers can be bound to multiple targets.
+ bool allow_buffers_on_multiple_targets_;
+
+ // Counts the number of Buffer allocated with 'this' as its manager.
+ // Allows to check no Buffer will outlive this.
+ unsigned int buffer_count_;
+
+ bool have_context_;
+ bool use_client_side_arrays_for_stream_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_BUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/buffer_manager_unittest.cc b/gpu/command_buffer/service/buffer_manager_unittest.cc
new file mode 100644
index 0000000..77f32dc
--- /dev/null
+++ b/gpu/command_buffer/service/buffer_manager_unittest.cc
@@ -0,0 +1,423 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+class BufferManagerTestBase : public GpuServiceTest {
+ protected:
+ void SetUpBase(
+ MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ const char* extensions) {
+ GpuServiceTest::SetUp();
+ if (feature_info) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), extensions);
+ feature_info->Initialize();
+ }
+ error_state_.reset(new MockErrorState());
+ manager_.reset(new BufferManager(memory_tracker, feature_info));
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(false);
+ manager_.reset();
+ error_state_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ GLenum GetTarget(const Buffer* buffer) const {
+ return buffer->target();
+ }
+
+ void DoBufferData(
+ Buffer* buffer, GLsizeiptr size, GLenum usage, const GLvoid* data,
+ GLenum error) {
+ TestHelper::DoBufferData(
+ gl_.get(), error_state_.get(), manager_.get(),
+ buffer, size, usage, data, error);
+ }
+
+ bool DoBufferSubData(
+ Buffer* buffer, GLintptr offset, GLsizeiptr size,
+ const GLvoid* data) {
+ bool success = true;
+ if (!buffer->CheckRange(offset, size)) {
+ EXPECT_CALL(*error_state_, SetGLError(_, _, GL_INVALID_VALUE, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ success = false;
+ } else if (!buffer->IsClientSideArray()) {
+ EXPECT_CALL(*gl_, BufferSubData(
+ buffer->target(), offset, size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ manager_->DoBufferSubData(
+ error_state_.get(), buffer, offset, size, data);
+ return success;
+ }
+
+ scoped_ptr<BufferManager> manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+class BufferManagerTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ SetUpBase(NULL, NULL, "");
+ }
+};
+
+class BufferManagerMemoryTrackerTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ SetUpBase(mock_memory_tracker_.get(), NULL, "");
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+class BufferManagerClientSideArraysTest : public BufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ feature_info_ = new FeatureInfo();
+ feature_info_->workarounds_.use_client_side_arrays_for_stream_buffers =
+ true;
+ SetUpBase(NULL, feature_info_.get(), "");
+ }
+
+ scoped_refptr<FeatureInfo> feature_info_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+TEST_F(BufferManagerTest, Basic) {
+ const GLuint kClientBuffer1Id = 1;
+ const GLuint kServiceBuffer1Id = 11;
+ const GLsizeiptr kBuffer1Size = 123;
+ const GLuint kClientBuffer2Id = 2;
+ // Check we can create buffer.
+ manager_->CreateBuffer(kClientBuffer1Id, kServiceBuffer1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClientBuffer1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ EXPECT_EQ(0u, GetTarget(buffer1));
+ EXPECT_EQ(0, buffer1->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_STATIC_DRAW), buffer1->usage());
+ EXPECT_FALSE(buffer1->IsDeleted());
+ EXPECT_FALSE(buffer1->IsClientSideArray());
+ EXPECT_EQ(kServiceBuffer1Id, buffer1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_->GetClientId(buffer1->service_id(), &client_id));
+ EXPECT_EQ(kClientBuffer1Id, client_id);
+ manager_->SetTarget(buffer1, GL_ELEMENT_ARRAY_BUFFER);
+ EXPECT_EQ(static_cast<GLenum>(GL_ELEMENT_ARRAY_BUFFER), GetTarget(buffer1));
+ // Check we and set its size.
+ DoBufferData(buffer1, kBuffer1Size, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_EQ(kBuffer1Size, buffer1->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_DYNAMIC_DRAW), buffer1->usage());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(manager_->GetBuffer(kClientBuffer2Id) == NULL);
+ // Check trying to a remove non-existent buffers does not crash.
+ manager_->RemoveBuffer(kClientBuffer2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kServiceBuffer1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the buffer after we remove it.
+ manager_->RemoveBuffer(kClientBuffer1Id);
+ EXPECT_TRUE(manager_->GetBuffer(kClientBuffer1Id) == NULL);
+}
+
+TEST_F(BufferManagerMemoryTrackerTest, Basic) {
+ const GLuint kClientBuffer1Id = 1;
+ const GLuint kServiceBuffer1Id = 11;
+ const GLsizeiptr kBuffer1Size1 = 123;
+ const GLsizeiptr kBuffer1Size2 = 456;
+ // Check we can create buffer.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kManaged);
+ manager_->CreateBuffer(kClientBuffer1Id, kServiceBuffer1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClientBuffer1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ manager_->SetTarget(buffer1, GL_ELEMENT_ARRAY_BUFFER);
+ // Check we and set its size.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, kBuffer1Size1, MemoryTracker::kManaged);
+ DoBufferData(buffer1, kBuffer1Size1, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(kBuffer1Size1, 0, MemoryTracker::kManaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, kBuffer1Size2, MemoryTracker::kManaged);
+ DoBufferData(buffer1, kBuffer1Size2, GL_DYNAMIC_DRAW, NULL, GL_NO_ERROR);
+ // On delete it will get freed.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(kBuffer1Size2, 0, MemoryTracker::kManaged);
+}
+
+TEST_F(BufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create buffer.
+ manager_->CreateBuffer(kClient1Id, kService1Id);
+ // Check buffer got created.
+ Buffer* buffer1 = manager_->GetBuffer(kClient1Id);
+ ASSERT_TRUE(buffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ // Check the resources were released.
+ buffer1 = manager_->GetBuffer(kClient1Id);
+ ASSERT_TRUE(buffer1 == NULL);
+}
+
+TEST_F(BufferManagerTest, DoBufferSubData) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ EXPECT_TRUE(DoBufferSubData(buffer, sizeof(data), 0, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, sizeof(data), 1, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, 0, sizeof(data) + 1, data));
+ EXPECT_FALSE(DoBufferSubData(buffer, -1, sizeof(data), data));
+ EXPECT_FALSE(DoBufferSubData(buffer, 0, -1, data));
+ DoBufferData(buffer, 1, GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ const int size = 0x20000;
+ scoped_ptr<uint8[]> temp(new uint8[size]);
+ EXPECT_FALSE(DoBufferSubData(buffer, 0 - size, size, temp.get()));
+ EXPECT_FALSE(DoBufferSubData(buffer, 1, size / 2, temp.get()));
+}
+
+TEST_F(BufferManagerTest, GetRange) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ const char* buf =
+ static_cast<const char*>(buffer->GetRange(0, sizeof(data)));
+ ASSERT_TRUE(buf != NULL);
+ const char* buf1 =
+ static_cast<const char*>(buffer->GetRange(1, sizeof(data) - 1));
+ EXPECT_EQ(buf + 1, buf1);
+ EXPECT_TRUE(buffer->GetRange(sizeof(data), 1) == NULL);
+ EXPECT_TRUE(buffer->GetRange(0, sizeof(data) + 1) == NULL);
+ EXPECT_TRUE(buffer->GetRange(-1, sizeof(data)) == NULL);
+ EXPECT_TRUE(buffer->GetRange(-0, -1) == NULL);
+ const int size = 0x20000;
+ DoBufferData(buffer, size / 2, GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(buffer->GetRange(0 - size, size) == NULL);
+ EXPECT_TRUE(buffer->GetRange(1, size / 2) == NULL);
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint8) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint8 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint8 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 4, 3, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 4, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 4, 3, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 0, 11, GL_UNSIGNED_BYTE, &max_value));
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 10, 1, GL_UNSIGNED_BYTE, &max_value));
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint16) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint16 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint16 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check odd offset fails for GL_UNSIGNED_SHORT.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 1, 10, GL_UNSIGNED_SHORT, &max_value));
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 8, 3, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 8, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 8, 3, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(
+ 0, 10, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 0, 11, GL_UNSIGNED_SHORT, &max_value));
+ EXPECT_FALSE(buffer->GetMaxValueForRange(
+ 20, 1, GL_UNSIGNED_SHORT, &max_value));
+}
+
+TEST_F(BufferManagerTest, GetMaxValueForRangeUint32) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint32 new_data[] = {100, 120, 110};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ EXPECT_TRUE(DoBufferSubData(buffer, 0, sizeof(data), data));
+ GLuint max_value;
+ // Check entire range succeeds.
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check non aligned offsets fails for GL_UNSIGNED_INT.
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(1, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(2, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(3, 10, GL_UNSIGNED_INT, &max_value));
+ // Check sub range succeeds.
+ EXPECT_TRUE(buffer->GetMaxValueForRange(16, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(6u, max_value);
+ // Check changing sub range succeeds.
+ EXPECT_TRUE(DoBufferSubData(buffer, 16, sizeof(new_data), new_data));
+ EXPECT_TRUE(buffer->GetMaxValueForRange(16, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ max_value = 0;
+ EXPECT_TRUE(buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(120u, max_value);
+ // Check out of range fails.
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(0, 11, GL_UNSIGNED_INT, &max_value));
+ EXPECT_FALSE(
+ buffer->GetMaxValueForRange(40, 1, GL_UNSIGNED_INT, &max_value));
+}
+
+TEST_F(BufferManagerTest, UseDeletedBuffer) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ scoped_refptr<Buffer> buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer.get() != NULL);
+ manager_->SetTarget(buffer.get(), GL_ARRAY_BUFFER);
+ // Remove buffer
+ manager_->RemoveBuffer(kClientBufferId);
+ // Use it after removing
+ DoBufferData(buffer.get(), sizeof(data), GL_STATIC_DRAW, NULL, GL_NO_ERROR);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, ::testing::Pointee(kServiceBufferId)))
+ .Times(1)
+ .RetiresOnSaturation();
+ buffer = NULL;
+}
+
+// Test buffers get shadowed when they are supposed to be.
+TEST_F(BufferManagerClientSideArraysTest, StreamBuffersAreShadowed) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ static const uint32 data[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ARRAY_BUFFER);
+ DoBufferData(buffer, sizeof(data), GL_STREAM_DRAW, data, GL_NO_ERROR);
+ EXPECT_TRUE(buffer->IsClientSideArray());
+ EXPECT_EQ(0, memcmp(data, buffer->GetRange(0, sizeof(data)), sizeof(data)));
+ DoBufferData(buffer, sizeof(data), GL_DYNAMIC_DRAW, data, GL_NO_ERROR);
+ EXPECT_FALSE(buffer->IsClientSideArray());
+}
+
+TEST_F(BufferManagerTest, MaxValueCacheClearedCorrectly) {
+ const GLuint kClientBufferId = 1;
+ const GLuint kServiceBufferId = 11;
+ const uint32 data1[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
+ const uint32 data2[] = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
+ const uint32 data3[] = {30, 29, 28};
+ manager_->CreateBuffer(kClientBufferId, kServiceBufferId);
+ Buffer* buffer = manager_->GetBuffer(kClientBufferId);
+ ASSERT_TRUE(buffer != NULL);
+ manager_->SetTarget(buffer, GL_ELEMENT_ARRAY_BUFFER);
+ GLuint max_value;
+ // Load the buffer with some initial data, and then get the maximum value for
+ // a range, which has the side effect of caching it.
+ DoBufferData(buffer, sizeof(data1), GL_STATIC_DRAW, data1, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(10u, max_value);
+ // Check that any cached values are invalidated if the buffer is reloaded
+ // with the same amount of data (but different content)
+ ASSERT_EQ(sizeof(data2), sizeof(data1));
+ DoBufferData(buffer, sizeof(data2), GL_STATIC_DRAW, data2, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 10, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(20u, max_value);
+ // Check that any cached values are invalidated if the buffer is reloaded
+ // with entirely different content.
+ ASSERT_NE(sizeof(data3), sizeof(data1));
+ DoBufferData(buffer, sizeof(data3), GL_STATIC_DRAW, data3, GL_NO_ERROR);
+ EXPECT_TRUE(
+ buffer->GetMaxValueForRange(0, 3, GL_UNSIGNED_INT, &max_value));
+ EXPECT_EQ(30u, max_value);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/cmd_buffer_engine.h b/gpu/command_buffer/service/cmd_buffer_engine.h
new file mode 100644
index 0000000..75e6069
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_buffer_engine.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the CommandBufferEngine class, providing the main loop for
+// the service, exposing the RPC API, managing the command parser.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/common/buffer.h"
+
+namespace gpu {
+
+class CommandBufferEngine {
+ public:
+ CommandBufferEngine() {
+ }
+
+ virtual ~CommandBufferEngine() {
+ }
+
+ // Gets the base address and size of a registered shared memory buffer.
+ // Parameters:
+ // shm_id: the identifier for the shared memory buffer.
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id) = 0;
+
+ // Sets the token value.
+ virtual void set_token(int32 token) = 0;
+
+ // Sets the shared memory buffer used for commands.
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) = 0;
+
+ // Sets the "get" pointer. Return false if offset is out of range.
+ virtual bool SetGetOffset(int32 offset) = 0;
+
+ // Gets the "get" pointer.
+ virtual int32 GetGetOffset() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferEngine);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CMD_BUFFER_ENGINE_H_
diff --git a/gpu/command_buffer/service/cmd_parser.cc b/gpu/command_buffer/service/cmd_parser.cc
new file mode 100644
index 0000000..ffcdfff
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the command parser.
+
+#include "gpu/command_buffer/service/cmd_parser.h"
+
+#include "base/logging.h"
+#include "base/debug/trace_event.h"
+
+namespace gpu {
+
+CommandParser::CommandParser(AsyncAPIInterface* handler)
+ : get_(0),
+ put_(0),
+ buffer_(NULL),
+ entry_count_(0),
+ handler_(handler) {
+}
+
+void CommandParser::SetBuffer(
+ void* shm_address,
+ size_t shm_size,
+ ptrdiff_t offset,
+ size_t size) {
+ // check proper alignments.
+ DCHECK_EQ(0, (reinterpret_cast<intptr_t>(shm_address)) % 4);
+ DCHECK_EQ(0, offset % 4);
+ DCHECK_EQ(0u, size % 4);
+ // check that the command buffer fits into the memory buffer.
+ DCHECK_GE(shm_size, offset + size);
+ get_ = 0;
+ put_ = 0;
+ char* buffer_begin = static_cast<char*>(shm_address) + offset;
+ buffer_ = reinterpret_cast<CommandBufferEntry*>(buffer_begin);
+ entry_count_ = size / 4;
+}
+
+// Process one command, reading the header from the command buffer, and
+// forwarding the command index and the arguments to the handler.
+// Note that:
+// - validation needs to happen on a copy of the data (to avoid race
+// conditions). This function only validates the header, leaving the arguments
+// validation to the handler, so it can pass a reference to them.
+// - get_ is modified *after* the command has been executed.
+error::Error CommandParser::ProcessCommands(int num_commands) {
+ int num_entries = put_ < get_ ? entry_count_ - get_ : put_ - get_;
+ int entries_processed = 0;
+
+ error::Error result = handler_->DoCommands(
+ num_commands, buffer_ + get_, num_entries, &entries_processed);
+
+ get_ += entries_processed;
+ if (get_ == entry_count_)
+ get_ = 0;
+
+ return result;
+}
+
+// Processes all the commands, while the buffer is not empty. Stop if an error
+// is encountered.
+error::Error CommandParser::ProcessAllCommands() {
+ while (!IsEmpty()) {
+ error::Error error = ProcessCommands(kParseCommandsSlice);
+ if (error)
+ return error;
+ }
+ return error::kNoError;
+}
+
+// Decode multiple commands, and call the corresponding GL functions.
+// NOTE: buffer is a pointer to the command buffer. As such, it could be
+// changed by a (malicious) client at any time, so if validation has to happen,
+// it should operate on a copy of them.
+error::Error AsyncAPIInterface::DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ int commands_to_process = num_commands;
+ error::Error result = error::kNoError;
+ const CommandBufferEntry* cmd_data =
+ static_cast<const CommandBufferEntry*>(buffer);
+ int process_pos = 0;
+
+ while (process_pos < num_entries && result == error::kNoError &&
+ commands_to_process--) {
+ CommandHeader header = cmd_data->value_header;
+ if (header.size == 0) {
+ DVLOG(1) << "Error: zero sized command in command buffer";
+ return error::kInvalidSize;
+ }
+
+ if (static_cast<int>(header.size) + process_pos > num_entries) {
+ DVLOG(1) << "Error: get offset out of bounds";
+ return error::kOutOfBounds;
+ }
+
+ const unsigned int command = header.command;
+ const unsigned int arg_count = header.size - 1;
+
+ result = DoCommand(command, arg_count, cmd_data);
+
+ if (result != error::kDeferCommandUntilLater) {
+ process_pos += header.size;
+ cmd_data += header.size;
+ }
+ }
+
+ if (entries_processed)
+ *entries_processed = process_pos;
+
+ return result;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/cmd_parser.h b/gpu/command_buffer/service/cmd_parser.h
new file mode 100644
index 0000000..ac52d86
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the command parser class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
+
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class AsyncAPIInterface;
+
+// Command parser class. This class parses commands from a shared memory
+// buffer, to implement some asynchronous RPC mechanism.
+class GPU_EXPORT CommandParser {
+ public:
+ static const int kParseCommandsSlice = 20;
+
+ explicit CommandParser(AsyncAPIInterface* handler);
+
+ // Sets the buffer to read commands from.
+ void SetBuffer(
+ void* shm_address,
+ size_t shm_size,
+ ptrdiff_t offset,
+ size_t size);
+
+ // Gets the "get" pointer. The get pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ CommandBufferOffset get() const { return get_; }
+
+ // Sets the "get" pointer. The get pointer is an index into the command buffer
+ // considered as an array of CommandBufferEntry.
+ bool set_get(CommandBufferOffset get) {
+ if (get >= 0 && get < entry_count_) {
+ get_ = get;
+ return true;
+ }
+ return false;
+ }
+
+ // Sets the "put" pointer. The put pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ void set_put(CommandBufferOffset put) { put_ = put; }
+
+ // Gets the "put" pointer. The put pointer is an index into the command
+ // buffer considered as an array of CommandBufferEntry.
+ CommandBufferOffset put() const { return put_; }
+
+ // Checks whether there are commands to process.
+ bool IsEmpty() const { return put_ == get_; }
+
+ // Processes one command, updating the get pointer. This will return an error
+ // if there are no commands in the buffer.
+ error::Error ProcessCommands(int num_commands);
+
+ // Processes all commands until get == put.
+ error::Error ProcessAllCommands();
+
+ private:
+ CommandBufferOffset get_;
+ CommandBufferOffset put_;
+ CommandBufferEntry* buffer_;
+ int32 entry_count_;
+ AsyncAPIInterface* handler_;
+};
+
+// This class defines the interface for an asynchronous API handler, that
+// is responsible for de-multiplexing commands and their arguments.
+class GPU_EXPORT AsyncAPIInterface {
+ public:
+ AsyncAPIInterface() {}
+ virtual ~AsyncAPIInterface() {}
+
+ // Executes a single command.
+ // Parameters:
+ // command: the command index.
+ // arg_count: the number of CommandBufferEntry arguments.
+ // cmd_data: the command data.
+ // Returns:
+ // error::kNoError if no error was found, one of
+ // error::Error otherwise.
+ virtual error::Error DoCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) = 0;
+
+ // Executes multiple commands.
+ // Parameters:
+ // num_commands: maximum number of commands to execute from buffer.
+ // buffer: pointer to first command entry to process.
+ // num_entries: number of sequential command buffer entries in buffer.
+ // entries_processed: if not 0, is set to the number of entries processed.
+ virtual error::Error DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Returns a name for a command. Useful for logging / debuging.
+ virtual const char* GetCommandName(unsigned int command_id) const = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CMD_PARSER_H_
diff --git a/gpu/command_buffer/service/cmd_parser_test.cc b/gpu/command_buffer/service/cmd_parser_test.cc
new file mode 100644
index 0000000..d880830
--- /dev/null
+++ b/gpu/command_buffer/service/cmd_parser_test.cc
@@ -0,0 +1,313 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the command parser.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+using testing::_;
+using testing::Invoke;
+using testing::Mock;
+using testing::Return;
+using testing::Sequence;
+using testing::SetArgPointee;
+using testing::Truly;
+
+// Test fixture for CommandParser test - Creates a mock AsyncAPIInterface, and
+// a fixed size memory buffer. Also provides a simple API to create a
+// CommandParser.
+class CommandParserTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(false));
+ buffer_entry_count_ = 20;
+ buffer_.reset(new CommandBufferEntry[buffer_entry_count_]);
+ }
+ virtual void TearDown() {}
+
+ void AddDoCommandsExpect(error::Error _return,
+ unsigned int num_commands,
+ int num_entries,
+ int num_processed) {
+ EXPECT_CALL(*api_mock_, DoCommands(num_commands, _, num_entries, _))
+ .InSequence(sequence_)
+ .WillOnce(DoAll(SetArgPointee<3>(num_processed), Return(_return)));
+ }
+
+ // Creates a parser, with a buffer of the specified size (in entries).
+ CommandParser *MakeParser(unsigned int entry_count) {
+ size_t shm_size = buffer_entry_count_ *
+ sizeof(CommandBufferEntry); // NOLINT
+ size_t command_buffer_size = entry_count *
+ sizeof(CommandBufferEntry); // NOLINT
+ DCHECK_LE(command_buffer_size, shm_size);
+ CommandParser* parser = new CommandParser(api_mock());
+
+ parser->SetBuffer(buffer(), shm_size, 0, command_buffer_size);
+ return parser;
+ }
+
+ unsigned int buffer_entry_count() { return 20; }
+ AsyncAPIMock *api_mock() { return api_mock_.get(); }
+ CommandBufferEntry *buffer() { return buffer_.get(); }
+ private:
+ unsigned int buffer_entry_count_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<CommandBufferEntry[]> buffer_;
+ Sequence sequence_;
+};
+
+// Tests initialization conditions.
+TEST_F(CommandParserTest, TestInit) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ EXPECT_EQ(0, parser->get());
+ EXPECT_EQ(0, parser->put());
+ EXPECT_TRUE(parser->IsEmpty());
+}
+
+// Tests simple commands.
+TEST_F(CommandParserTest, TestSimple) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add a single command, no args
+ header.size = 1;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(error::kNoError, 1, 1, 1);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add a single command, 2 args
+ header.size = 3;
+ header.command = 456;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 2134;
+ buffer()[put++].value_float = 1.f;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(error::kNoError, 1, 3, 3);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests having multiple commands in the buffer.
+TEST_F(CommandParserTest, TestMultipleCommands) {
+ scoped_ptr<CommandParser> parser(MakeParser(10));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add 2 commands, test with single ProcessCommands()
+ header.size = 2;
+ header.command = 789;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5151;
+
+ CommandBufferOffset put_cmd2 = put;
+ header.size = 2;
+ header.command = 876;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 3434;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // Process up to 1 command. 4 entries remaining.
+ AddDoCommandsExpect(error::kNoError, 1, 4, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put_cmd2, parser->get());
+
+ // Process up to 1 command. 2 entries remaining.
+ AddDoCommandsExpect(error::kNoError, 1, 2, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(1));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add 2 commands again, test with ProcessAllCommands()
+ header.size = 2;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5656;
+
+ header.size = 2;
+ header.command = 321;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 7878;
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // 4 entries remaining.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 4, 4);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests that the parser will wrap correctly at the end of the buffer.
+TEST_F(CommandParserTest, TestWrap) {
+ scoped_ptr<CommandParser> parser(MakeParser(5));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add 3 commands with no args (1 word each)
+ for (unsigned int i = 0; i < 3; ++i) {
+ header.size = 1;
+ header.command = i;
+ buffer()[put++].value_header = header;
+ }
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ // Process up to 10 commands. 3 entries remaining to put.
+ AddDoCommandsExpect(error::kNoError, 10, 3, 3);
+ EXPECT_EQ(error::kNoError, parser->ProcessCommands(10));
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ // add 1 command with 1 arg (2 words). That should put us at the end of the
+ // buffer.
+ header.size = 2;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 5;
+
+ DCHECK_EQ(5, put);
+ put = 0;
+
+ // add 1 command with 1 arg (2 words).
+ header.size = 2;
+ header.command = 4;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 6;
+
+ // 2 entries remaining to end of buffer.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ // 2 entries remaining to put.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+// Tests error conditions.
+TEST_F(CommandParserTest, TestError) {
+ const unsigned int kNumEntries = 5;
+ scoped_ptr<CommandParser> parser(MakeParser(kNumEntries));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ EXPECT_FALSE(parser->set_get(-1));
+ EXPECT_FALSE(parser->set_get(kNumEntries));
+
+ // Generate a command with size 0.
+ header.size = 0;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(
+ error::kInvalidSize, CommandParser::kParseCommandsSlice, 1, 0);
+ EXPECT_EQ(error::kInvalidSize,
+ parser->ProcessAllCommands());
+ // check that no DoCommand call was made.
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ parser.reset(MakeParser(5));
+ put = parser->put();
+
+ // Generate a command with size 6, extends beyond the end of the buffer.
+ header.size = 6;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+
+ AddDoCommandsExpect(
+ error::kOutOfBounds, CommandParser::kParseCommandsSlice, 1, 0);
+ EXPECT_EQ(error::kOutOfBounds,
+ parser->ProcessAllCommands());
+ // check that no DoCommand call was made.
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ parser.reset(MakeParser(5));
+ put = parser->put();
+
+ // Generates 2 commands.
+ header.size = 1;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+ CommandBufferOffset put_post_fail = put;
+ header.size = 1;
+ header.command = 4;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ EXPECT_EQ(put, parser->put());
+ // have the first command fail to parse.
+ AddDoCommandsExpect(
+ error::kUnknownCommand, CommandParser::kParseCommandsSlice, 2, 1);
+ EXPECT_EQ(error::kUnknownCommand,
+ parser->ProcessAllCommands());
+ // check that only one command was executed, and that get reflects that
+ // correctly.
+ EXPECT_EQ(put_post_fail, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+ // make the second one succeed, and check that the parser recovered fine.
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 1, 1);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
+TEST_F(CommandParserTest, SetBuffer) {
+ scoped_ptr<CommandParser> parser(MakeParser(3));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // add a single command, no args
+ header.size = 2;
+ header.command = 123;
+ buffer()[put++].value_header = header;
+ buffer()[put++].value_int32 = 456;
+ parser->set_put(put);
+
+ AddDoCommandsExpect(
+ error::kNoError, CommandParser::kParseCommandsSlice, 2, 2);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ // We should have advanced 2 entries
+ EXPECT_EQ(2, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+
+ scoped_ptr<CommandBufferEntry[]> buffer2(new CommandBufferEntry[2]);
+ parser->SetBuffer(
+ buffer2.get(), sizeof(CommandBufferEntry) * 2, 0,
+ sizeof(CommandBufferEntry) * 2);
+ // The put and get should have reset to 0.
+ EXPECT_EQ(0, parser->get());
+ EXPECT_EQ(0, parser->put());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.cc b/gpu/command_buffer/service/command_buffer_service.cc
new file mode 100644
index 0000000..2c732c6
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service.cc
@@ -0,0 +1,192 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/command_buffer_service.h"
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/debug/trace_event.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+CommandBufferService::CommandBufferService(
+ TransferBufferManagerInterface* transfer_buffer_manager)
+ : ring_buffer_id_(-1),
+ shared_state_(NULL),
+ num_entries_(0),
+ get_offset_(0),
+ put_offset_(0),
+ transfer_buffer_manager_(transfer_buffer_manager),
+ token_(0),
+ generation_(0),
+ error_(error::kNoError),
+ context_lost_reason_(error::kUnknown) {
+}
+
+CommandBufferService::~CommandBufferService() {
+}
+
+bool CommandBufferService::Initialize() {
+ return true;
+}
+
+CommandBufferService::State CommandBufferService::GetLastState() {
+ State state;
+ state.num_entries = num_entries_;
+ state.get_offset = get_offset_;
+ state.put_offset = put_offset_;
+ state.token = token_;
+ state.error = error_;
+ state.context_lost_reason = context_lost_reason_;
+ state.generation = ++generation_;
+
+ return state;
+}
+
+int32 CommandBufferService::GetLastToken() {
+ return GetLastState().token;
+}
+
+void CommandBufferService::UpdateState() {
+ if (shared_state_) {
+ CommandBufferService::State state = GetLastState();
+ shared_state_->Write(state);
+ }
+}
+
+void CommandBufferService::WaitForTokenInRange(int32 start, int32 end) {
+ DCHECK(error_ != error::kNoError || InRange(start, end, token_));
+}
+
+void CommandBufferService::WaitForGetOffsetInRange(int32 start, int32 end) {
+ DCHECK(error_ != error::kNoError || InRange(start, end, get_offset_));
+}
+
+void CommandBufferService::Flush(int32 put_offset) {
+ if (put_offset < 0 || put_offset > num_entries_) {
+ error_ = gpu::error::kOutOfBounds;
+ return;
+ }
+
+ put_offset_ = put_offset;
+
+ if (!put_offset_change_callback_.is_null())
+ put_offset_change_callback_.Run();
+}
+
+void CommandBufferService::SetGetBuffer(int32 transfer_buffer_id) {
+ DCHECK_EQ(-1, ring_buffer_id_);
+ DCHECK_EQ(put_offset_, get_offset_); // Only if it's empty.
+ // If the buffer is invalid we handle it gracefully.
+ // This means ring_buffer_ can be NULL.
+ ring_buffer_ = GetTransferBuffer(transfer_buffer_id);
+ ring_buffer_id_ = transfer_buffer_id;
+ int32 size = ring_buffer_.get() ? ring_buffer_->size() : 0;
+ num_entries_ = size / sizeof(CommandBufferEntry);
+ put_offset_ = 0;
+ SetGetOffset(0);
+ if (!get_buffer_change_callback_.is_null()) {
+ get_buffer_change_callback_.Run(ring_buffer_id_);
+ }
+
+ UpdateState();
+}
+
+void CommandBufferService::SetSharedStateBuffer(
+ scoped_ptr<BufferBacking> shared_state_buffer) {
+ shared_state_buffer_ = shared_state_buffer.Pass();
+ DCHECK(shared_state_buffer_->GetSize() >= sizeof(*shared_state_));
+
+ shared_state_ =
+ static_cast<CommandBufferSharedState*>(shared_state_buffer_->GetMemory());
+
+ UpdateState();
+}
+
+void CommandBufferService::SetGetOffset(int32 get_offset) {
+ DCHECK(get_offset >= 0 && get_offset < num_entries_);
+ get_offset_ = get_offset;
+}
+
+scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(size_t size,
+ int32* id) {
+ *id = -1;
+
+ scoped_ptr<SharedMemory> shared_memory(new SharedMemory());
+ if (!shared_memory->CreateAndMapAnonymous(size))
+ return NULL;
+
+ static int32 next_id = 1;
+ *id = next_id++;
+
+ if (!RegisterTransferBuffer(
+ *id, MakeBackingFromSharedMemory(shared_memory.Pass(), size))) {
+ *id = -1;
+ return NULL;
+ }
+
+ return GetTransferBuffer(*id);
+}
+
+void CommandBufferService::DestroyTransferBuffer(int32 id) {
+ transfer_buffer_manager_->DestroyTransferBuffer(id);
+ if (id == ring_buffer_id_) {
+ ring_buffer_id_ = -1;
+ ring_buffer_ = NULL;
+ num_entries_ = 0;
+ get_offset_ = 0;
+ put_offset_ = 0;
+ }
+}
+
+scoped_refptr<Buffer> CommandBufferService::GetTransferBuffer(int32 id) {
+ return transfer_buffer_manager_->GetTransferBuffer(id);
+}
+
+bool CommandBufferService::RegisterTransferBuffer(
+ int32 id,
+ scoped_ptr<BufferBacking> buffer) {
+ return transfer_buffer_manager_->RegisterTransferBuffer(id, buffer.Pass());
+}
+
+void CommandBufferService::SetToken(int32 token) {
+ token_ = token;
+ UpdateState();
+}
+
+void CommandBufferService::SetParseError(error::Error error) {
+ if (error_ == error::kNoError) {
+ error_ = error;
+ if (!parse_error_callback_.is_null())
+ parse_error_callback_.Run();
+ }
+}
+
+void CommandBufferService::SetContextLostReason(
+ error::ContextLostReason reason) {
+ context_lost_reason_ = reason;
+}
+
+void CommandBufferService::SetPutOffsetChangeCallback(
+ const base::Closure& callback) {
+ put_offset_change_callback_ = callback;
+}
+
+void CommandBufferService::SetGetBufferChangeCallback(
+ const GetBufferChangedCallback& callback) {
+ get_buffer_change_callback_ = callback;
+}
+
+void CommandBufferService::SetParseErrorCallback(
+ const base::Closure& callback) {
+ parse_error_callback_ = callback;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.h b/gpu/command_buffer/service/command_buffer_service.h
new file mode 100644
index 0000000..ac23301
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
+
+#include "base/callback.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+
+namespace gpu {
+
+class TransferBufferManagerInterface;
+
+class GPU_EXPORT CommandBufferServiceBase : public CommandBuffer {
+ public:
+ // Sets the current get offset. This can be called from any thread.
+ virtual void SetGetOffset(int32 get_offset) = 0;
+
+ // Get the transfer buffer associated with an ID. Returns a null buffer for
+ // ID 0.
+ virtual scoped_refptr<gpu::Buffer> GetTransferBuffer(int32 id) = 0;
+
+ // Allows the reader to update the current token value.
+ virtual void SetToken(int32 token) = 0;
+
+ // Allows the reader to set the current parse error.
+ virtual void SetParseError(error::Error) = 0;
+
+ // Allows the reader to set the current context lost reason.
+ // NOTE: if calling this in conjunction with SetParseError,
+ // call this first.
+ virtual void SetContextLostReason(error::ContextLostReason) = 0;
+};
+
+// An object that implements a shared memory command buffer and a synchronous
+// API to manage the put and get pointers.
+class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
+ public:
+ typedef base::Callback<bool(int32)> GetBufferChangedCallback;
+ explicit CommandBufferService(
+ TransferBufferManagerInterface* transfer_buffer_manager);
+ virtual ~CommandBufferService();
+
+ // CommandBuffer implementation:
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void Flush(int32 put_offset) OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int32 transfer_buffer_id) OVERRIDE;
+ virtual scoped_refptr<Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+
+ // CommandBufferServiceBase implementation:
+ virtual void SetGetOffset(int32 get_offset) OVERRIDE;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+ virtual void SetToken(int32 token) OVERRIDE;
+ virtual void SetParseError(error::Error error) OVERRIDE;
+ virtual void SetContextLostReason(error::ContextLostReason) OVERRIDE;
+
+ // Sets a callback that is called whenever the put offset is changed. When
+ // called with sync==true, the callback must not return until some progress
+ // has been made (unless the command buffer is empty), i.e. the get offset
+ // must have changed. It need not process the entire command buffer though.
+ // This allows concurrency between the writer and the reader while giving the
+ // writer a means of waiting for the reader to make some progress before
+ // attempting to write more to the command buffer. Takes ownership of
+ // callback.
+ virtual void SetPutOffsetChangeCallback(const base::Closure& callback);
+ // Sets a callback that is called whenever the get buffer is changed.
+ virtual void SetGetBufferChangeCallback(
+ const GetBufferChangedCallback& callback);
+ virtual void SetParseErrorCallback(const base::Closure& callback);
+
+ // Setup the shared memory that shared state should be copied into.
+ void SetSharedStateBuffer(scoped_ptr<BufferBacking> shared_state_buffer);
+
+ // Copy the current state into the shared state transfer buffer.
+ void UpdateState();
+
+ // Registers an existing shared memory object and get an ID that can be used
+ // to identify it in the command buffer.
+ bool RegisterTransferBuffer(int32 id, scoped_ptr<BufferBacking> buffer);
+
+ private:
+ int32 ring_buffer_id_;
+ scoped_refptr<Buffer> ring_buffer_;
+ scoped_ptr<BufferBacking> shared_state_buffer_;
+ CommandBufferSharedState* shared_state_;
+ int32 num_entries_;
+ int32 get_offset_;
+ int32 put_offset_;
+ base::Closure put_offset_change_callback_;
+ GetBufferChangedCallback get_buffer_change_callback_;
+ base::Closure parse_error_callback_;
+ TransferBufferManagerInterface* transfer_buffer_manager_;
+ int32 token_;
+ uint32 generation_;
+ error::Error error_;
+ error::ContextLostReason context_lost_reason_;
+
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferService);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_SERVICE_H_
diff --git a/gpu/command_buffer/service/command_buffer_service_unittest.cc b/gpu/command_buffer/service/command_buffer_service_unittest.cc
new file mode 100644
index 0000000..229aafa
--- /dev/null
+++ b/gpu/command_buffer/service/command_buffer_service_unittest.cc
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using base::SharedMemory;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+
+namespace gpu {
+
+class CommandBufferServiceTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+ }
+
+ int32 GetGetOffset() {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ int32 GetPutOffset() {
+ return command_buffer_->GetLastState().put_offset;
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+ int32 GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+ bool Initialize(size_t size) {
+ int32 id;
+ command_buffer_->CreateTransferBuffer(size, &id);
+ EXPECT_GT(id, 0);
+ command_buffer_->SetGetBuffer(id);
+ return true;
+ }
+
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+};
+
+TEST_F(CommandBufferServiceTest, InitializesCommandBuffer) {
+ EXPECT_TRUE(Initialize(1024));
+ CommandBuffer::State state = command_buffer_->GetLastState();
+ EXPECT_EQ(0, state.get_offset);
+ EXPECT_EQ(0, state.put_offset);
+ EXPECT_EQ(0, state.token);
+ EXPECT_EQ(error::kNoError, state.error);
+}
+
+namespace {
+
+class CallbackTest {
+ public:
+ virtual void PutOffsetChanged() = 0;
+ virtual bool GetBufferChanged(int32 id) = 0;
+};
+
+class MockCallbackTest : public CallbackTest {
+ public:
+ MOCK_METHOD0(PutOffsetChanged, void());
+ MOCK_METHOD1(GetBufferChanged, bool(int32));
+};
+
+} // anonymous namespace
+
+TEST_F(CommandBufferServiceTest, CanSyncGetAndPutOffset) {
+ Initialize(1024);
+
+ scoped_ptr<StrictMock<MockCallbackTest> > change_callback(
+ new StrictMock<MockCallbackTest>);
+ command_buffer_->SetPutOffsetChangeCallback(
+ base::Bind(
+ &CallbackTest::PutOffsetChanged,
+ base::Unretained(change_callback.get())));
+
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(2);
+ EXPECT_EQ(0, GetGetOffset());
+ EXPECT_EQ(2, GetPutOffset());
+
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(4);
+ EXPECT_EQ(0, GetGetOffset());
+ EXPECT_EQ(4, GetPutOffset());
+
+ command_buffer_->SetGetOffset(2);
+ EXPECT_EQ(2, GetGetOffset());
+ EXPECT_CALL(*change_callback, PutOffsetChanged());
+ command_buffer_->Flush(6);
+
+ command_buffer_->Flush(-1);
+ EXPECT_NE(error::kNoError, GetError());
+ command_buffer_->Flush(1024);
+ EXPECT_NE(error::kNoError, GetError());
+}
+
+TEST_F(CommandBufferServiceTest, SetGetBuffer) {
+ int32 ring_buffer_id;
+ command_buffer_->CreateTransferBuffer(1024, &ring_buffer_id);
+ EXPECT_GT(ring_buffer_id, 0);
+
+ scoped_ptr<StrictMock<MockCallbackTest> > change_callback(
+ new StrictMock<MockCallbackTest>);
+ command_buffer_->SetGetBufferChangeCallback(
+ base::Bind(
+ &CallbackTest::GetBufferChanged,
+ base::Unretained(change_callback.get())));
+
+ EXPECT_CALL(*change_callback, GetBufferChanged(ring_buffer_id))
+ .WillOnce(Return(true));
+
+ command_buffer_->SetGetBuffer(ring_buffer_id);
+ EXPECT_EQ(0, GetGetOffset());
+}
+
+TEST_F(CommandBufferServiceTest, DefaultTokenIsZero) {
+ EXPECT_EQ(0, GetToken());
+}
+
+TEST_F(CommandBufferServiceTest, CanSetToken) {
+ command_buffer_->SetToken(7);
+ EXPECT_EQ(7, GetToken());
+}
+
+TEST_F(CommandBufferServiceTest, DefaultParseErrorIsNoError) {
+ EXPECT_EQ(0, GetError());
+}
+
+TEST_F(CommandBufferServiceTest, CanSetParseError) {
+ command_buffer_->SetParseError(error::kInvalidSize);
+ EXPECT_EQ(1, GetError());
+}
+} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder.cc b/gpu/command_buffer/service/common_decoder.cc
new file mode 100644
index 0000000..86a37ba
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder.cc
@@ -0,0 +1,295 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+
+namespace gpu {
+
+CommonDecoder::Bucket::Bucket() : size_(0) {}
+
+CommonDecoder::Bucket::~Bucket() {}
+
+void* CommonDecoder::Bucket::GetData(size_t offset, size_t size) const {
+ if (OffsetSizeValid(offset, size)) {
+ return data_.get() + offset;
+ }
+ return NULL;
+}
+
+void CommonDecoder::Bucket::SetSize(size_t size) {
+ if (size != size_) {
+ data_.reset(size ? new int8[size] : NULL);
+ size_ = size;
+ memset(data_.get(), 0, size);
+ }
+}
+
+bool CommonDecoder::Bucket::SetData(
+ const void* src, size_t offset, size_t size) {
+ if (OffsetSizeValid(offset, size)) {
+ memcpy(data_.get() + offset, src, size);
+ return true;
+ }
+ return false;
+}
+
+void CommonDecoder::Bucket::SetFromString(const char* str) {
+ // Strings are passed NULL terminated to distinguish between empty string
+ // and no string.
+ if (!str) {
+ SetSize(0);
+ } else {
+ size_t size = strlen(str) + 1;
+ SetSize(size);
+ SetData(str, 0, size);
+ }
+}
+
+bool CommonDecoder::Bucket::GetAsString(std::string* str) {
+ DCHECK(str);
+ if (size_ == 0) {
+ return false;
+ }
+ str->assign(GetDataAs<const char*>(0, size_ - 1), size_ - 1);
+ return true;
+}
+
+CommonDecoder::CommonDecoder() : engine_(NULL) {}
+
+CommonDecoder::~CommonDecoder() {}
+
+void* CommonDecoder::GetAddressAndCheckSize(unsigned int shm_id,
+ unsigned int data_offset,
+ unsigned int data_size) {
+ CHECK(engine_);
+ scoped_refptr<gpu::Buffer> buffer = engine_->GetSharedMemoryBuffer(shm_id);
+ if (!buffer.get())
+ return NULL;
+ return buffer->GetDataAddress(data_offset, data_size);
+}
+
+scoped_refptr<gpu::Buffer> CommonDecoder::GetSharedMemoryBuffer(
+ unsigned int shm_id) {
+ return engine_->GetSharedMemoryBuffer(shm_id);
+}
+
+const char* CommonDecoder::GetCommonCommandName(
+ cmd::CommandId command_id) const {
+ return cmd::GetCommandName(command_id);
+}
+
+CommonDecoder::Bucket* CommonDecoder::GetBucket(uint32 bucket_id) const {
+ BucketMap::const_iterator iter(buckets_.find(bucket_id));
+ return iter != buckets_.end() ? &(*iter->second) : NULL;
+}
+
+CommonDecoder::Bucket* CommonDecoder::CreateBucket(uint32 bucket_id) {
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ bucket = new Bucket();
+ buckets_[bucket_id] = linked_ptr<Bucket>(bucket);
+ }
+ return bucket;
+}
+
+namespace {
+
+// Returns the address of the first byte after a struct.
+template <typename T>
+const void* AddressAfterStruct(const T& pod) {
+ return reinterpret_cast<const uint8*>(&pod) + sizeof(pod);
+}
+
+// Returns the address of the frst byte after the struct.
+template <typename RETURN_TYPE, typename COMMAND_TYPE>
+RETURN_TYPE GetImmediateDataAs(const COMMAND_TYPE& pod) {
+ return static_cast<RETURN_TYPE>(const_cast<void*>(AddressAfterStruct(pod)));
+}
+
+// TODO(vmiura): Looks like this g_command_info is duplicated in
+// common_decoder.cc
+// and gles2_cmd_decoder.cc. Fix it!
+
+// A struct to hold info about each command.
+struct CommandInfo {
+ uint8 arg_flags; // How to handle the arguments for this command
+ uint8 cmd_flags; // How to handle this command
+ uint16 arg_count; // How many arguments are expected for this command.
+};
+
+// A table of CommandInfo for all the commands.
+const CommandInfo g_command_info[] = {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) { \
+ cmd::name::kArgFlags, \
+ cmd::name::cmd_flags, \
+ sizeof(cmd::name) / sizeof(CommandBufferEntry) - 1, }, /* NOLINT */
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+};
+
+} // anonymous namespace.
+
+// Decode command with its arguments, and call the corresponding method.
+// Note: args is a pointer to the command buffer. As such, it could be changed
+// by a (malicious) client at any time, so if validation has to happen, it
+// should operate on a copy of them.
+error::Error CommonDecoder::DoCommonCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) {
+ if (command < arraysize(g_command_info)) {
+ const CommandInfo& info = g_command_info[command];
+ unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
+ if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
+ (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
+ uint32 immediate_data_size =
+ (arg_count - info_arg_count) * sizeof(CommandBufferEntry); // NOLINT
+ switch (command) {
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) \
+ case cmd::name::kCmdId: \
+ return Handle ## name( \
+ immediate_data_size, \
+ *static_cast<const cmd::name*>(cmd_data)); \
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+ }
+ } else {
+ return error::kInvalidArguments;
+ }
+ }
+ return error::kUnknownCommand;
+}
+
+error::Error CommonDecoder::HandleNoop(
+ uint32 immediate_data_size,
+ const cmd::Noop& args) {
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetToken(
+ uint32 immediate_data_size,
+ const cmd::SetToken& args) {
+ engine_->set_token(args.token);
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketSize(
+ uint32 immediate_data_size,
+ const cmd::SetBucketSize& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 size = args.size;
+
+ Bucket* bucket = CreateBucket(bucket_id);
+ bucket->SetSize(size);
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketData(
+ uint32 immediate_data_size,
+ const cmd::SetBucketData& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ const void* data = GetSharedMemoryAs<const void*>(
+ args.shared_memory_id, args.shared_memory_offset, size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ if (!bucket->SetData(data, offset, size)) {
+ return error::kInvalidArguments;
+ }
+
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleSetBucketDataImmediate(
+ uint32 immediate_data_size,
+ const cmd::SetBucketDataImmediate& args) {
+ const void* data = GetImmediateDataAs<const void*>(args);
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ if (size > immediate_data_size) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ if (!bucket->SetData(data, offset, size)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleGetBucketStart(
+ uint32 immediate_data_size,
+ const cmd::GetBucketStart& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32* result = GetSharedMemoryAs<uint32*>(
+ args.result_memory_id, args.result_memory_offset, sizeof(*result));
+ int32 data_memory_id = args.data_memory_id;
+ uint32 data_memory_offset = args.data_memory_offset;
+ uint32 data_memory_size = args.data_memory_size;
+ uint8* data = NULL;
+ if (data_memory_size != 0 || data_memory_id != 0 || data_memory_offset != 0) {
+ data = GetSharedMemoryAs<uint8*>(
+ args.data_memory_id, args.data_memory_offset, args.data_memory_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ }
+ if (!result) {
+ return error::kInvalidArguments;
+ }
+ // Check that the client initialized the result.
+ if (*result != 0) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 bucket_size = bucket->size();
+ *result = bucket_size;
+ if (data) {
+ uint32 size = std::min(data_memory_size, bucket_size);
+ memcpy(data, bucket->GetData(0, size), size);
+ }
+ return error::kNoError;
+}
+
+error::Error CommonDecoder::HandleGetBucketData(
+ uint32 immediate_data_size,
+ const cmd::GetBucketData& args) {
+ uint32 bucket_id = args.bucket_id;
+ uint32 offset = args.offset;
+ uint32 size = args.size;
+ void* data = GetSharedMemoryAs<void*>(
+ args.shared_memory_id, args.shared_memory_offset, size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ const void* src = bucket->GetData(offset, size);
+ if (!src) {
+ return error::kInvalidArguments;
+ }
+ memcpy(data, src, size);
+ return error::kNoError;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder.h b/gpu/command_buffer/service/common_decoder.h
new file mode 100644
index 0000000..2132afb
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+
+#include <map>
+#include <stack>
+#include <string>
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferEngine;
+
+// This class is a helper base class for implementing the common parts of the
+// o3d/gl2 command buffer decoder.
+class GPU_EXPORT CommonDecoder : NON_EXPORTED_BASE(public AsyncAPIInterface) {
+ public:
+ typedef error::Error Error;
+
+ static const unsigned int kMaxStackDepth = 32;
+
+ // A bucket is a buffer to help collect memory across a command buffer. When
+ // creating a command buffer implementation of an existing API, sometimes that
+ // API has functions that take a pointer to data. A good example is OpenGL's
+ // glBufferData. Because the data is separated between client and service,
+ // there are 2 ways to get this data across. 1 is to put all the data in
+ // shared memory. The problem with this is the data can be arbitarily large
+ // and the host OS may not support that much shared memory. Another solution
+ // is to shuffle memory across a little bit at a time, collecting it on the
+ // service side and when it is all there then call glBufferData. Buckets
+ // implement this second solution. Using the common commands, SetBucketSize,
+ // SetBucketData, SetBucketDataImmediate the client can fill a bucket. It can
+ // then call a command that uses that bucket (like BufferDataBucket in the
+ // GLES2 command buffer implementation).
+ //
+ // If you are designing an API from scratch you can avoid this need for
+ // Buckets by making your API always take an offset and a size
+ // similar to glBufferSubData.
+ //
+ // Buckets also help pass strings to/from the service. To return a string of
+ // arbitary size, the service puts the string in a bucket. The client can
+ // then query the size of a bucket and request sections of the bucket to
+ // be passed across shared memory.
+ class GPU_EXPORT Bucket {
+ public:
+ Bucket();
+ ~Bucket();
+
+ size_t size() const {
+ return size_;
+ }
+
+ // Gets a pointer to a section the bucket. Returns NULL if offset or size is
+ // out of range.
+ void* GetData(size_t offset, size_t size) const;
+
+ template <typename T>
+ T GetDataAs(size_t offset, size_t size) const {
+ return reinterpret_cast<T>(GetData(offset, size));
+ }
+
+ // Sets the size of the bucket.
+ void SetSize(size_t size);
+
+ // Sets a part of the bucket.
+ // Returns false if offset or size is out of range.
+ bool SetData(const void* src, size_t offset, size_t size);
+
+ // Sets the bucket data from a string. Strings are passed NULL terminated to
+ // distinguish between empty string and no string.
+ void SetFromString(const char* str);
+
+ // Gets the bucket data as a string. Strings are passed NULL terminated to
+ // distrinquish between empty string and no string. Returns False if there
+ // is no string.
+ bool GetAsString(std::string* str);
+
+ private:
+ bool OffsetSizeValid(size_t offset, size_t size) const {
+ size_t temp = offset + size;
+ return temp <= size_ && temp >= offset;
+ }
+
+ size_t size_;
+ ::scoped_ptr<int8[]> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bucket);
+ };
+
+ CommonDecoder();
+ virtual ~CommonDecoder();
+
+ // Sets the engine, to get shared memory buffers from, and to set the token
+ // to.
+ void set_engine(CommandBufferEngine* engine) {
+ engine_ = engine;
+ }
+ CommandBufferEngine* engine() const { return engine_; }
+
+ // Creates a bucket. If the bucket already exists returns that bucket.
+ Bucket* CreateBucket(uint32 bucket_id);
+
+ // Gets a bucket. Returns NULL if the bucket does not exist.
+ Bucket* GetBucket(uint32 bucket_id) const;
+
+ // Gets the address of shared memory data, given a shared memory ID and an
+ // offset. Also checks that the size is consistent with the shared memory
+ // size.
+ // Parameters:
+ // shm_id: the id of the shared memory buffer.
+ // offset: the offset of the data in the shared memory buffer.
+ // size: the size of the data.
+ // Returns:
+ // NULL if shm_id isn't a valid shared memory buffer ID or if the size
+ // check fails. Return a pointer to the data otherwise.
+ void* GetAddressAndCheckSize(unsigned int shm_id,
+ unsigned int offset,
+ unsigned int size);
+
+ // Typed version of GetAddressAndCheckSize.
+ template <typename T>
+ T GetSharedMemoryAs(unsigned int shm_id, unsigned int offset,
+ unsigned int size) {
+ return static_cast<T>(GetAddressAndCheckSize(shm_id, offset, size));
+ }
+
+ // Get the actual shared memory buffer.
+ scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(unsigned int shm_id);
+
+ protected:
+ // Executes a common command.
+ // Parameters:
+ // command: the command index.
+ // arg_count: the number of CommandBufferEntry arguments.
+ // cmd_data: the command data.
+ // Returns:
+ // error::kNoError if no error was found, one of
+ // error::Error otherwise.
+ error::Error DoCommonCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data);
+
+ // Gets an name for a common command.
+ const char* GetCommonCommandName(cmd::CommandId command_id) const;
+
+ private:
+ // Generate a member function prototype for each command in an automated and
+ // typesafe way.
+ #define COMMON_COMMAND_BUFFER_CMD_OP(name) \
+ error::Error Handle##name( \
+ uint32 immediate_data_size, \
+ const cmd::name& args); \
+
+ COMMON_COMMAND_BUFFER_CMDS(COMMON_COMMAND_BUFFER_CMD_OP)
+
+ #undef COMMON_COMMAND_BUFFER_CMD_OP
+
+ CommandBufferEngine* engine_;
+
+ typedef std::map<uint32, linked_ptr<Bucket> > BucketMap;
+ BucketMap buckets_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_COMMON_DECODER_H_
+
diff --git a/gpu/command_buffer/service/common_decoder_unittest.cc b/gpu/command_buffer/service/common_decoder_unittest.cc
new file mode 100644
index 0000000..0faa8e9
--- /dev/null
+++ b/gpu/command_buffer/service/common_decoder_unittest.cc
@@ -0,0 +1,513 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+TEST(CommonDecoderBucket, Basic) {
+ CommonDecoder::Bucket bucket;
+ EXPECT_EQ(0u, bucket.size());
+ EXPECT_TRUE(NULL == bucket.GetData(0, 0));
+}
+
+TEST(CommonDecoderBucket, Size) {
+ CommonDecoder::Bucket bucket;
+ bucket.SetSize(24);
+ EXPECT_EQ(24u, bucket.size());
+ bucket.SetSize(12);
+ EXPECT_EQ(12u, bucket.size());
+}
+
+TEST(CommonDecoderBucket, GetData) {
+ CommonDecoder::Bucket bucket;
+
+ bucket.SetSize(24);
+ EXPECT_TRUE(NULL != bucket.GetData(0, 0));
+ EXPECT_TRUE(NULL != bucket.GetData(24, 0));
+ EXPECT_TRUE(NULL == bucket.GetData(25, 0));
+ EXPECT_TRUE(NULL != bucket.GetData(0, 24));
+ EXPECT_TRUE(NULL == bucket.GetData(0, 25));
+ bucket.SetSize(23);
+ EXPECT_TRUE(NULL == bucket.GetData(0, 24));
+}
+
+TEST(CommonDecoderBucket, SetData) {
+ CommonDecoder::Bucket bucket;
+ static const char data[] = "testing";
+
+ bucket.SetSize(10);
+ EXPECT_TRUE(bucket.SetData(data, 0, sizeof(data)));
+ EXPECT_EQ(0, memcmp(data, bucket.GetData(0, sizeof(data)), sizeof(data)));
+ EXPECT_TRUE(bucket.SetData(data, 2, sizeof(data)));
+ EXPECT_EQ(0, memcmp(data, bucket.GetData(2, sizeof(data)), sizeof(data)));
+ EXPECT_FALSE(bucket.SetData(data, 0, sizeof(data) * 2));
+ EXPECT_FALSE(bucket.SetData(data, 5, sizeof(data)));
+}
+
+class TestCommonDecoder : public CommonDecoder {
+ public:
+ // Overridden from AsyncAPIInterface
+ virtual const char* GetCommandName(unsigned int command_id) const OVERRIDE {
+ return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
+ }
+
+ // Overridden from AsyncAPIInterface
+ virtual error::Error DoCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) OVERRIDE {
+ return DoCommonCommand(command, arg_count, cmd_data);
+ }
+
+ CommonDecoder::Bucket* GetBucket(uint32 id) const {
+ return CommonDecoder::GetBucket(id);
+ }
+};
+
+class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ static const int32 kStartValidShmId = 1;
+ static const int32 kValidShmId = 2;
+ static const int32 kInvalidShmId = 3;
+ static const size_t kBufferSize = 1024;
+ static const int32 kValidOffset = kBufferSize / 2;
+ static const int32 kInvalidOffset = kBufferSize;
+
+ MockCommandBufferEngine()
+ : CommandBufferEngine(),
+ token_(),
+ get_offset_(0) {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kBufferSize);
+ buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE {
+ if (IsValidSharedMemoryId(shm_id))
+ return buffer_;
+ return NULL;
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs(uint32 offset) {
+ DCHECK_LT(offset, kBufferSize);
+ int8* buffer_memory = static_cast<int8*>(buffer_->memory());
+ return reinterpret_cast<T>(&buffer_memory[offset]);
+ }
+
+ int32 GetSharedMemoryOffset(const void* memory) {
+ int8* buffer_memory = static_cast<int8*>(buffer_->memory());
+ ptrdiff_t offset = static_cast<const int8*>(memory) - &buffer_memory[0];
+ DCHECK_GE(offset, 0);
+ DCHECK_LT(static_cast<size_t>(offset), kBufferSize);
+ return static_cast<int32>(offset);
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual void set_token(int32 token) OVERRIDE {
+ token_ = token;
+ }
+
+ int32 token() const {
+ return token_;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) OVERRIDE {
+ NOTREACHED();
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE {
+ if (static_cast<size_t>(offset) < kBufferSize) {
+ get_offset_ = offset;
+ return true;
+ }
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE {
+ return get_offset_;
+ }
+
+ private:
+ bool IsValidSharedMemoryId(int32 shm_id) {
+ return shm_id == kValidShmId || shm_id == kStartValidShmId;
+ }
+
+ scoped_refptr<gpu::Buffer> buffer_;
+ int32 token_;
+ int32 get_offset_;
+};
+
+const int32 MockCommandBufferEngine::kStartValidShmId;
+const int32 MockCommandBufferEngine::kValidShmId;
+const int32 MockCommandBufferEngine::kInvalidShmId;
+const size_t MockCommandBufferEngine::kBufferSize;
+const int32 MockCommandBufferEngine::kValidOffset;
+const int32 MockCommandBufferEngine::kInvalidOffset;
+
+class CommonDecoderTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ decoder_.set_engine(&engine_);
+ }
+
+ virtual void TearDown() {
+ }
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ return decoder_.DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd)), 0);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return decoder_.DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd) + data_size), 0);
+ }
+
+ MockCommandBufferEngine engine_;
+ TestCommonDecoder decoder_;
+};
+
+TEST_F(CommonDecoderTest, Initialize) {
+ EXPECT_EQ(0, engine_.GetGetOffset());
+}
+
+TEST_F(CommonDecoderTest, DoCommonCommandInvalidCommand) {
+ EXPECT_EQ(error::kUnknownCommand, decoder_.DoCommand(999999, 0, NULL));
+}
+
+TEST_F(CommonDecoderTest, HandleNoop) {
+ cmd::Noop cmd;
+ const uint32 kSkipCount = 5;
+ cmd.Init(kSkipCount);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(
+ cmd, kSkipCount * kCommandBufferEntrySize));
+ const uint32 kSkipCount2 = 1;
+ cmd.Init(kSkipCount2);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(
+ cmd, kSkipCount2 * kCommandBufferEntrySize));
+}
+
+TEST_F(CommonDecoderTest, SetToken) {
+ cmd::SetToken cmd;
+ const int32 kTokenId = 123;
+ EXPECT_EQ(0, engine_.token());
+ cmd.Init(kTokenId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kTokenId, engine_.token());
+}
+
+TEST_F(CommonDecoderTest, SetBucketSize) {
+ cmd::SetBucketSize cmd;
+ const uint32 kBucketId = 123;
+ const uint32 kBucketLength1 = 1234;
+ const uint32 kBucketLength2 = 78;
+ // Check the bucket does not exist.
+ EXPECT_TRUE(NULL == decoder_.GetBucket(kBucketId));
+ // Check we can create one.
+ cmd.Init(kBucketId, kBucketLength1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket;
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_TRUE(NULL != bucket);
+ EXPECT_EQ(kBucketLength1, bucket->size());
+ // Check we can change it.
+ cmd.Init(kBucketId, kBucketLength2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_TRUE(NULL != bucket);
+ EXPECT_EQ(kBucketLength2, bucket->size());
+ // Check we can delete it.
+ cmd.Init(kBucketId, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bucket = decoder_.GetBucket(kBucketId);
+ EXPECT_EQ(0u, bucket->size());
+}
+
+TEST_F(CommonDecoderTest, SetBucketData) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData cmd;
+
+ static const char kData[] = "1234567890123456789";
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
+ // Check the data is not there.
+ EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it.
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ void* memory = engine_.GetSharedMemoryAs<void*>(kSomeOffsetInSharedMemory);
+ memcpy(memory, kData, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it partially.
+ static const char kData2[] = "ABCEDFG";
+ const uint32 kSomeOffsetInBucket = 5;
+ memcpy(memory, kData2, sizeof(kData2));
+ cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
+ kData2, sizeof(kData2)));
+ const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
+ // Check that nothing was affected outside of updated area.
+ EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
+ bucket_data[kSomeOffsetInBucket - 1]);
+ EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
+ bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the offset is out of range.
+ cmd.Init(kBucketId, bucket->size(), 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the size is out of range.
+ cmd.Init(kBucketId, 0, bucket->size() + 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_F(CommonDecoderTest, SetBucketDataImmediate) {
+ cmd::SetBucketSize size_cmd;
+ int8 buffer[1024];
+ cmd::SetBucketDataImmediate& cmd =
+ *reinterpret_cast<cmd::SetBucketDataImmediate*>(&buffer);
+
+ static const char kData[] = "1234567890123456789";
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
+ // Check the data is not there.
+ EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it.
+ void* memory = &buffer[0] + sizeof(cmd);
+ memcpy(memory, kData, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData));
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData)));
+ EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
+
+ // Check we can set it partially.
+ static const char kData2[] = "ABCEDFG";
+ const uint32 kSomeOffsetInBucket = 5;
+ memcpy(memory, kData2, sizeof(kData2));
+ cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2));
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+ EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
+ kData2, sizeof(kData2)));
+ const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
+ // Check that nothing was affected outside of updated area.
+ EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
+ bucket_data[kSomeOffsetInBucket - 1]);
+ EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
+ bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2));
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+
+ // Check that it fails if the offset is out of range.
+ cmd.Init(kBucketId, bucket->size(), 1);
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+
+ // Check that it fails if the size is out of range.
+ cmd.Init(kBucketId, 0, bucket->size() + 1);
+ EXPECT_NE(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(kData2)));
+}
+
+TEST_F(CommonDecoderTest, GetBucketStart) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData set_cmd;
+ cmd::GetBucketStart cmd;
+
+ static const char kData[] = "1234567890123456789";
+ static const char zero[sizeof(kData)] = { 0, };
+
+ const uint32 kBucketSize = sizeof(kData);
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ // Put data in the bucket.
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ uint8* start = engine_.GetSharedMemoryAs<uint8*>(kSomeOffsetInSharedMemory);
+ memcpy(start, kData, sizeof(kData));
+ set_cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
+
+ // Check that the size is correct with no data buffer.
+ uint32* memory =
+ engine_.GetSharedMemoryAs<uint32*>(kSomeOffsetInSharedMemory);
+ *memory = 0x0;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+
+ // Check that the data is copied with data buffer.
+ const uint32 kDataOffsetInSharedMemory = 54;
+ uint8* data = engine_.GetSharedMemoryAs<uint8*>(kDataOffsetInSharedMemory);
+ *memory = 0x0;
+ memset(data, 0, sizeof(kData));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ kBucketSize, MockCommandBufferEngine::kValidShmId,
+ kDataOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+ EXPECT_EQ(0, memcmp(data, kData, kBucketSize));
+
+ // Check that we can get a piece.
+ *memory = 0x0;
+ memset(data, 0, sizeof(kData));
+ const uint32 kPieceSize = kBucketSize / 2;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ kPieceSize, MockCommandBufferEngine::kValidShmId,
+ kDataOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kBucketSize, *memory);
+ EXPECT_EQ(0, memcmp(data, kData, kPieceSize));
+ EXPECT_EQ(0, memcmp(data + kPieceSize, zero, sizeof(kData) - kPieceSize));
+
+ // Check that it fails if the result_id is invalid
+ cmd.Init(kInvalidBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_id is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 1, MockCommandBufferEngine::kInvalidShmId, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_size is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 1, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ MockCommandBufferEngine::kBufferSize + 1,
+ MockCommandBufferEngine::kValidShmId, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the data_offset is invalid
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 1);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ MockCommandBufferEngine::kBufferSize,
+ MockCommandBufferEngine::kValidShmId, 1);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the result size is not set to zero
+ *memory = 0x1;
+ cmd.Init(kBucketId,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory,
+ 0, 0, 0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_F(CommonDecoderTest, GetBucketData) {
+ cmd::SetBucketSize size_cmd;
+ cmd::SetBucketData set_cmd;
+ cmd::GetBucketData cmd;
+
+ static const char kData[] = "1234567890123456789";
+ static const char zero[sizeof(kData)] = { 0, };
+
+ const uint32 kBucketId = 123;
+ const uint32 kInvalidBucketId = 124;
+
+ size_cmd.Init(kBucketId, sizeof(kData));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
+ const uint32 kSomeOffsetInSharedMemory = 50;
+ uint8* memory = engine_.GetSharedMemoryAs<uint8*>(kSomeOffsetInSharedMemory);
+ memcpy(memory, kData, sizeof(kData));
+ set_cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
+
+ // Check we can get the whole thing.
+ memset(memory, 0, sizeof(kData));
+ cmd.Init(kBucketId, 0, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(memory, kData, sizeof(kData)));
+
+ // Check we can get a piece.
+ const uint32 kSomeOffsetInBucket = 5;
+ const uint32 kLengthOfPiece = 6;
+ const uint8 kSentinel = 0xff;
+ memset(memory, 0, sizeof(kData));
+ memory[-1] = kSentinel;
+ cmd.Init(kBucketId, kSomeOffsetInBucket, kLengthOfPiece,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, memcmp(memory, kData + kSomeOffsetInBucket, kLengthOfPiece));
+ EXPECT_EQ(0, memcmp(memory + kLengthOfPiece, zero,
+ sizeof(kData) - kLengthOfPiece));
+ EXPECT_EQ(kSentinel, memory[-1]);
+
+ // Check that it fails if the bucket_id is invalid
+ cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData),
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the offset is invalid
+ cmd.Init(kBucketId, sizeof(kData) + 1, 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Check that it fails if the size is invalid
+ cmd.Init(kBucketId, 0, sizeof(kData) + 1,
+ MockCommandBufferEngine::kValidShmId, kSomeOffsetInSharedMemory);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/context_group.cc b/gpu/command_buffer/service/context_group.cc
new file mode 100644
index 0000000..fe692be
--- /dev/null
+++ b/gpu/command_buffer/service/context_group.cc
@@ -0,0 +1,380 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_group.h"
+
+#include <algorithm>
+#include <string>
+
+#include "base/command_line.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+ContextGroup::ContextGroup(
+ const scoped_refptr<MailboxManager>& mailbox_manager,
+ const scoped_refptr<MemoryTracker>& memory_tracker,
+ const scoped_refptr<ShaderTranslatorCache>& shader_translator_cache,
+ const scoped_refptr<FeatureInfo>& feature_info,
+ bool bind_generates_resource)
+ : mailbox_manager_(mailbox_manager),
+ memory_tracker_(memory_tracker),
+ shader_translator_cache_(shader_translator_cache),
+ enforce_gl_minimums_(CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnforceGLMinimums)),
+ bind_generates_resource_(bind_generates_resource),
+ max_vertex_attribs_(0u),
+ max_texture_units_(0u),
+ max_texture_image_units_(0u),
+ max_vertex_texture_image_units_(0u),
+ max_fragment_uniform_vectors_(0u),
+ max_varying_vectors_(0u),
+ max_vertex_uniform_vectors_(0u),
+ max_color_attachments_(1u),
+ max_draw_buffers_(1u),
+ program_cache_(NULL),
+ feature_info_(feature_info),
+ draw_buffer_(GL_BACK) {
+ {
+ if (!mailbox_manager_.get())
+ mailbox_manager_ = new MailboxManager;
+ if (!feature_info.get())
+ feature_info_ = new FeatureInfo;
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ manager->Initialize();
+ }
+}
+
+static void GetIntegerv(GLenum pname, uint32* var) {
+ GLint value = 0;
+ glGetIntegerv(pname, &value);
+ *var = value;
+}
+
+bool ContextGroup::Initialize(
+ GLES2Decoder* decoder,
+ const DisallowedFeatures& disallowed_features) {
+ // If we've already initialized the group just add the context.
+ if (HaveContexts()) {
+ decoders_.push_back(base::AsWeakPtr<GLES2Decoder>(decoder));
+ return true;
+ }
+
+ if (!feature_info_->Initialize(disallowed_features)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because FeatureInfo "
+ << "initialization failed.";
+ return false;
+ }
+
+ const GLint kMinRenderbufferSize = 512; // GL says 1 pixel!
+ GLint max_renderbuffer_size = 0;
+ if (!QueryGLFeature(
+ GL_MAX_RENDERBUFFER_SIZE, kMinRenderbufferSize,
+ &max_renderbuffer_size)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because maximum "
+ << "renderbuffer size too small.";
+ return false;
+ }
+ GLint max_samples = 0;
+ if (feature_info_->feature_flags().chromium_framebuffer_multisample ||
+ feature_info_->feature_flags().multisampled_render_to_texture) {
+ if (feature_info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture) {
+ glGetIntegerv(GL_MAX_SAMPLES_IMG, &max_samples);
+ } else {
+ glGetIntegerv(GL_MAX_SAMPLES, &max_samples);
+ }
+ }
+
+ if (feature_info_->feature_flags().ext_draw_buffers) {
+ GetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, &max_color_attachments_);
+ if (max_color_attachments_ < 1)
+ max_color_attachments_ = 1;
+ GetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &max_draw_buffers_);
+ if (max_draw_buffers_ < 1)
+ max_draw_buffers_ = 1;
+ draw_buffer_ = GL_BACK;
+ }
+
+ const bool depth24_supported = feature_info_->feature_flags().oes_depth24;
+
+ buffer_manager_.reset(
+ new BufferManager(memory_tracker_.get(), feature_info_.get()));
+ framebuffer_manager_.reset(
+ new FramebufferManager(max_draw_buffers_, max_color_attachments_));
+ renderbuffer_manager_.reset(new RenderbufferManager(
+ memory_tracker_.get(), max_renderbuffer_size, max_samples,
+ depth24_supported));
+ shader_manager_.reset(new ShaderManager());
+
+ // Lookup GL things we need to know.
+ const GLint kGLES2RequiredMinimumVertexAttribs = 8u;
+ if (!QueryGLFeatureU(
+ GL_MAX_VERTEX_ATTRIBS, kGLES2RequiredMinimumVertexAttribs,
+ &max_vertex_attribs_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "vertex attributes supported.";
+ return false;
+ }
+
+ const GLuint kGLES2RequiredMinimumTextureUnits = 8u;
+ if (!QueryGLFeatureU(
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, kGLES2RequiredMinimumTextureUnits,
+ &max_texture_units_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "texture units supported.";
+ return false;
+ }
+
+ GLint max_texture_size = 0;
+ GLint max_cube_map_texture_size = 0;
+ const GLint kMinTextureSize = 2048; // GL actually says 64!?!?
+ const GLint kMinCubeMapSize = 256; // GL actually says 16!?!?
+ if (!QueryGLFeature(
+ GL_MAX_TEXTURE_SIZE, kMinTextureSize, &max_texture_size) ||
+ !QueryGLFeature(
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE, kMinCubeMapSize,
+ &max_cube_map_texture_size)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because maximum texture size"
+ << "is too small.";
+ return false;
+ }
+
+ if (feature_info_->workarounds().max_texture_size) {
+ max_texture_size = std::min(
+ max_texture_size, feature_info_->workarounds().max_texture_size);
+ }
+ if (feature_info_->workarounds().max_cube_map_texture_size) {
+ max_cube_map_texture_size = std::min(
+ max_cube_map_texture_size,
+ feature_info_->workarounds().max_cube_map_texture_size);
+ }
+
+ texture_manager_.reset(new TextureManager(memory_tracker_.get(),
+ feature_info_.get(),
+ max_texture_size,
+ max_cube_map_texture_size,
+ bind_generates_resource_));
+ texture_manager_->set_framebuffer_manager(framebuffer_manager_.get());
+
+ const GLint kMinTextureImageUnits = 8;
+ const GLint kMinVertexTextureImageUnits = 0;
+ if (!QueryGLFeatureU(
+ GL_MAX_TEXTURE_IMAGE_UNITS, kMinTextureImageUnits,
+ &max_texture_image_units_) ||
+ !QueryGLFeatureU(
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, kMinVertexTextureImageUnits,
+ &max_vertex_texture_image_units_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "texture units.";
+ return false;
+ }
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ GetIntegerv(GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ &max_fragment_uniform_vectors_);
+ GetIntegerv(GL_MAX_VARYING_VECTORS, &max_varying_vectors_);
+ GetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &max_vertex_uniform_vectors_);
+ } else {
+ GetIntegerv(
+ GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max_fragment_uniform_vectors_);
+ max_fragment_uniform_vectors_ /= 4;
+ GetIntegerv(GL_MAX_VARYING_FLOATS, &max_varying_vectors_);
+ max_varying_vectors_ /= 4;
+ GetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS, &max_vertex_uniform_vectors_);
+ max_vertex_uniform_vectors_ /= 4;
+ }
+
+ const GLint kMinFragmentUniformVectors = 16;
+ const GLint kMinVaryingVectors = 8;
+ const GLint kMinVertexUniformVectors = 128;
+ if (!CheckGLFeatureU(
+ kMinFragmentUniformVectors, &max_fragment_uniform_vectors_) ||
+ !CheckGLFeatureU(kMinVaryingVectors, &max_varying_vectors_) ||
+ !CheckGLFeatureU(
+ kMinVertexUniformVectors, &max_vertex_uniform_vectors_)) {
+ LOG(ERROR) << "ContextGroup::Initialize failed because too few "
+ << "uniforms or varyings supported.";
+ return false;
+ }
+
+ // Some shaders in Skia need more than the min available vertex and
+ // fragment shader uniform vectors in case of OSMesa GL Implementation
+ if (feature_info_->workarounds().max_fragment_uniform_vectors) {
+ max_fragment_uniform_vectors_ = std::min(
+ max_fragment_uniform_vectors_,
+ static_cast<uint32>(
+ feature_info_->workarounds().max_fragment_uniform_vectors));
+ }
+ if (feature_info_->workarounds().max_varying_vectors) {
+ max_varying_vectors_ = std::min(
+ max_varying_vectors_,
+ static_cast<uint32>(feature_info_->workarounds().max_varying_vectors));
+ }
+ if (feature_info_->workarounds().max_vertex_uniform_vectors) {
+ max_vertex_uniform_vectors_ =
+ std::min(max_vertex_uniform_vectors_,
+ static_cast<uint32>(
+ feature_info_->workarounds().max_vertex_uniform_vectors));
+ }
+
+ program_manager_.reset(new ProgramManager(
+ program_cache_, max_varying_vectors_));
+
+ if (!texture_manager_->Initialize()) {
+ LOG(ERROR) << "Context::Group::Initialize failed because texture manager "
+ << "failed to initialize.";
+ return false;
+ }
+
+ decoders_.push_back(base::AsWeakPtr<GLES2Decoder>(decoder));
+ return true;
+}
+
+namespace {
+
+bool IsNull(const base::WeakPtr<gles2::GLES2Decoder>& decoder) {
+ return !decoder.get();
+}
+
+template <typename T>
+class WeakPtrEquals {
+ public:
+ explicit WeakPtrEquals(T* t) : t_(t) {}
+
+ bool operator()(const base::WeakPtr<T>& t) {
+ return t.get() == t_;
+ }
+
+ private:
+ T* const t_;
+};
+
+} // namespace anonymous
+
+bool ContextGroup::HaveContexts() {
+ decoders_.erase(std::remove_if(decoders_.begin(), decoders_.end(), IsNull),
+ decoders_.end());
+ return !decoders_.empty();
+}
+
+void ContextGroup::Destroy(GLES2Decoder* decoder, bool have_context) {
+ decoders_.erase(std::remove_if(decoders_.begin(), decoders_.end(),
+ WeakPtrEquals<gles2::GLES2Decoder>(decoder)),
+ decoders_.end());
+ // If we still have contexts do nothing.
+ if (HaveContexts()) {
+ return;
+ }
+
+ if (buffer_manager_ != NULL) {
+ buffer_manager_->Destroy(have_context);
+ buffer_manager_.reset();
+ }
+
+ if (framebuffer_manager_ != NULL) {
+ framebuffer_manager_->Destroy(have_context);
+ if (texture_manager_)
+ texture_manager_->set_framebuffer_manager(NULL);
+ framebuffer_manager_.reset();
+ }
+
+ if (renderbuffer_manager_ != NULL) {
+ renderbuffer_manager_->Destroy(have_context);
+ renderbuffer_manager_.reset();
+ }
+
+ if (texture_manager_ != NULL) {
+ texture_manager_->Destroy(have_context);
+ texture_manager_.reset();
+ }
+
+ if (program_manager_ != NULL) {
+ program_manager_->Destroy(have_context);
+ program_manager_.reset();
+ }
+
+ if (shader_manager_ != NULL) {
+ shader_manager_->Destroy(have_context);
+ shader_manager_.reset();
+ }
+
+ memory_tracker_ = NULL;
+}
+
+uint32 ContextGroup::GetMemRepresented() const {
+ uint32 total = 0;
+ if (buffer_manager_.get())
+ total += buffer_manager_->mem_represented();
+ if (renderbuffer_manager_.get())
+ total += renderbuffer_manager_->mem_represented();
+ if (texture_manager_.get())
+ total += texture_manager_->mem_represented();
+ return total;
+}
+
+void ContextGroup::LoseContexts(GLenum reset_status) {
+ for (size_t ii = 0; ii < decoders_.size(); ++ii) {
+ if (decoders_[ii].get()) {
+ decoders_[ii]->LoseContext(reset_status);
+ }
+ }
+}
+
+ContextGroup::~ContextGroup() {
+ CHECK(!HaveContexts());
+}
+
+bool ContextGroup::CheckGLFeature(GLint min_required, GLint* v) {
+ GLint value = *v;
+ if (enforce_gl_minimums_) {
+ value = std::min(min_required, value);
+ }
+ *v = value;
+ return value >= min_required;
+}
+
+bool ContextGroup::CheckGLFeatureU(GLint min_required, uint32* v) {
+ GLint value = *v;
+ if (enforce_gl_minimums_) {
+ value = std::min(min_required, value);
+ }
+ *v = value;
+ return value >= min_required;
+}
+
+bool ContextGroup::QueryGLFeature(
+ GLenum pname, GLint min_required, GLint* v) {
+ GLint value = 0;
+ glGetIntegerv(pname, &value);
+ *v = value;
+ return CheckGLFeature(min_required, v);
+}
+
+bool ContextGroup::QueryGLFeatureU(
+ GLenum pname, GLint min_required, uint32* v) {
+ uint32 value = 0;
+ GetIntegerv(pname, &value);
+ bool result = CheckGLFeatureU(min_required, &value);
+ *v = value;
+ return result;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/context_group.h b/gpu/command_buffer/service/context_group.h
new file mode 100644
index 0000000..ae4550c
--- /dev/null
+++ b/gpu/command_buffer/service/context_group.h
@@ -0,0 +1,222 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class TransferBufferManagerInterface;
+
+namespace gles2 {
+
+class ProgramCache;
+class BufferManager;
+class GLES2Decoder;
+class FramebufferManager;
+class MailboxManager;
+class RenderbufferManager;
+class ProgramManager;
+class ShaderManager;
+class TextureManager;
+class MemoryTracker;
+struct DisallowedFeatures;
+
+// A Context Group helps manage multiple GLES2Decoders that share
+// resources.
+class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
+ public:
+ ContextGroup(
+ const scoped_refptr<MailboxManager>& mailbox_manager,
+ const scoped_refptr<MemoryTracker>& memory_tracker,
+ const scoped_refptr<ShaderTranslatorCache>& shader_translator_cache,
+ const scoped_refptr<FeatureInfo>& feature_info,
+ bool bind_generates_resource);
+
+ // This should only be called by GLES2Decoder. This must be paired with a
+ // call to destroy if it succeeds.
+ bool Initialize(
+ GLES2Decoder* decoder,
+ const DisallowedFeatures& disallowed_features);
+
+ // Destroys all the resources when called for the last context in the group.
+ // It should only be called by GLES2Decoder.
+ void Destroy(GLES2Decoder* decoder, bool have_context);
+
+ MailboxManager* mailbox_manager() const {
+ return mailbox_manager_.get();
+ }
+
+ MemoryTracker* memory_tracker() const {
+ return memory_tracker_.get();
+ }
+
+ ShaderTranslatorCache* shader_translator_cache() const {
+ return shader_translator_cache_.get();
+ }
+
+ bool bind_generates_resource() {
+ return bind_generates_resource_;
+ }
+
+ uint32 max_vertex_attribs() const {
+ return max_vertex_attribs_;
+ }
+
+ uint32 max_texture_units() const {
+ return max_texture_units_;
+ }
+
+ uint32 max_texture_image_units() const {
+ return max_texture_image_units_;
+ }
+
+ uint32 max_vertex_texture_image_units() const {
+ return max_vertex_texture_image_units_;
+ }
+
+ uint32 max_fragment_uniform_vectors() const {
+ return max_fragment_uniform_vectors_;
+ }
+
+ uint32 max_varying_vectors() const {
+ return max_varying_vectors_;
+ }
+
+ uint32 max_vertex_uniform_vectors() const {
+ return max_vertex_uniform_vectors_;
+ }
+
+ uint32 max_color_attachments() const {
+ return max_color_attachments_;
+ }
+
+ uint32 max_draw_buffers() const {
+ return max_draw_buffers_;
+ }
+
+ FeatureInfo* feature_info() {
+ return feature_info_.get();
+ }
+
+ BufferManager* buffer_manager() const {
+ return buffer_manager_.get();
+ }
+
+ FramebufferManager* framebuffer_manager() const {
+ return framebuffer_manager_.get();
+ }
+
+ RenderbufferManager* renderbuffer_manager() const {
+ return renderbuffer_manager_.get();
+ }
+
+ TextureManager* texture_manager() const {
+ return texture_manager_.get();
+ }
+
+ ProgramManager* program_manager() const {
+ return program_manager_.get();
+ }
+
+ bool has_program_cache() const {
+ return program_cache_ != NULL;
+ }
+
+ void set_program_cache(ProgramCache* program_cache) {
+ program_cache_ = program_cache;
+ }
+
+ ShaderManager* shader_manager() const {
+ return shader_manager_.get();
+ }
+
+ TransferBufferManagerInterface* transfer_buffer_manager() const {
+ return transfer_buffer_manager_.get();
+ }
+
+ uint32 GetMemRepresented() const;
+
+ // Loses all the context associated with this group.
+ void LoseContexts(GLenum reset_status);
+
+ // EXT_draw_buffer related states for backbuffer.
+ GLenum draw_buffer() const {
+ return draw_buffer_;
+ }
+ void set_draw_buffer(GLenum buf) {
+ draw_buffer_ = buf;
+ }
+
+ private:
+ friend class base::RefCounted<ContextGroup>;
+ ~ContextGroup();
+
+ bool CheckGLFeature(GLint min_required, GLint* v);
+ bool CheckGLFeatureU(GLint min_required, uint32* v);
+ bool QueryGLFeature(GLenum pname, GLint min_required, GLint* v);
+ bool QueryGLFeatureU(GLenum pname, GLint min_required, uint32* v);
+ bool HaveContexts();
+
+ scoped_refptr<MailboxManager> mailbox_manager_;
+ scoped_refptr<MemoryTracker> memory_tracker_;
+ scoped_refptr<ShaderTranslatorCache> shader_translator_cache_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+
+ bool enforce_gl_minimums_;
+ bool bind_generates_resource_;
+
+ uint32 max_vertex_attribs_;
+ uint32 max_texture_units_;
+ uint32 max_texture_image_units_;
+ uint32 max_vertex_texture_image_units_;
+ uint32 max_fragment_uniform_vectors_;
+ uint32 max_varying_vectors_;
+ uint32 max_vertex_uniform_vectors_;
+ uint32 max_color_attachments_;
+ uint32 max_draw_buffers_;
+
+ ProgramCache* program_cache_;
+
+ scoped_ptr<BufferManager> buffer_manager_;
+
+ scoped_ptr<FramebufferManager> framebuffer_manager_;
+
+ scoped_ptr<RenderbufferManager> renderbuffer_manager_;
+
+ scoped_ptr<TextureManager> texture_manager_;
+
+ scoped_ptr<ProgramManager> program_manager_;
+
+ scoped_ptr<ShaderManager> shader_manager_;
+
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ std::vector<base::WeakPtr<gles2::GLES2Decoder> > decoders_;
+
+ GLenum draw_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContextGroup);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_GROUP_H_
+
+
diff --git a/gpu/command_buffer/service/context_group_unittest.cc b/gpu/command_buffer/service/context_group_unittest.cc
new file mode 100644
index 0000000..7aa1301
--- /dev/null
+++ b/gpu/command_buffer/service/context_group_unittest.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_group.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Not;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+class ContextGroupTest : public GpuServiceTest {
+ public:
+ static const bool kBindGeneratesResource = false;
+
+ ContextGroupTest() {}
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ decoder_.reset(new MockGLES2Decoder());
+ group_ = scoped_refptr<ContextGroup>(
+ new ContextGroup(NULL, NULL, NULL, NULL, kBindGeneratesResource));
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_refptr<ContextGroup> group_;
+};
+
+TEST_F(ContextGroupTest, Basic) {
+ // Test it starts off uninitialized.
+ EXPECT_EQ(0u, group_->max_vertex_attribs());
+ EXPECT_EQ(0u, group_->max_texture_units());
+ EXPECT_EQ(0u, group_->max_texture_image_units());
+ EXPECT_EQ(0u, group_->max_vertex_texture_image_units());
+ EXPECT_EQ(0u, group_->max_fragment_uniform_vectors());
+ EXPECT_EQ(0u, group_->max_varying_vectors());
+ EXPECT_EQ(0u, group_->max_vertex_uniform_vectors());
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+TEST_F(ContextGroupTest, InitializeNoExtensions) {
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(), DisallowedFeatures(), "", "", kBindGeneratesResource);
+ group_->Initialize(decoder_.get(), DisallowedFeatures());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kNumVertexAttribs),
+ group_->max_vertex_attribs());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kNumTextureUnits),
+ group_->max_texture_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxTextureImageUnits),
+ group_->max_texture_image_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVertexTextureImageUnits),
+ group_->max_vertex_texture_image_units());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxFragmentUniformVectors),
+ group_->max_fragment_uniform_vectors());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVaryingVectors),
+ group_->max_varying_vectors());
+ EXPECT_EQ(static_cast<uint32>(TestHelper::kMaxVertexUniformVectors),
+ group_->max_vertex_uniform_vectors());
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder_.get(), false);
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+TEST_F(ContextGroupTest, MultipleContexts) {
+ scoped_ptr<MockGLES2Decoder> decoder2_(new MockGLES2Decoder());
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(), DisallowedFeatures(), "", "", kBindGeneratesResource);
+ group_->Initialize(decoder_.get(), DisallowedFeatures());
+ group_->Initialize(decoder2_.get(), DisallowedFeatures());
+
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder_.get(), false);
+
+ EXPECT_TRUE(group_->buffer_manager() != NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() != NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() != NULL);
+ EXPECT_TRUE(group_->texture_manager() != NULL);
+ EXPECT_TRUE(group_->program_manager() != NULL);
+ EXPECT_TRUE(group_->shader_manager() != NULL);
+
+ group_->Destroy(decoder2_.get(), false);
+
+ EXPECT_TRUE(group_->buffer_manager() == NULL);
+ EXPECT_TRUE(group_->framebuffer_manager() == NULL);
+ EXPECT_TRUE(group_->renderbuffer_manager() == NULL);
+ EXPECT_TRUE(group_->texture_manager() == NULL);
+ EXPECT_TRUE(group_->program_manager() == NULL);
+ EXPECT_TRUE(group_->shader_manager() == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/context_state.cc b/gpu/command_buffer/service/context_state.cc
new file mode 100644
index 0000000..eb7fc49
--- /dev/null
+++ b/gpu/command_buffer/service/context_state.cc
@@ -0,0 +1,302 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_state.h"
+
+#include <cmath>
+
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+static void EnableDisable(GLenum pname, bool enable) {
+ if (enable) {
+ glEnable(pname);
+ } else {
+ glDisable(pname);
+ }
+}
+
+GLuint Get2dServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_2d.get()
+ ? unit.bound_texture_2d->service_id() : 0;
+}
+
+GLuint GetCubeServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_cube_map.get()
+ ? unit.bound_texture_cube_map->service_id() : 0;
+}
+
+GLuint GetOesServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_external_oes.get()
+ ? unit.bound_texture_external_oes->service_id() : 0;
+}
+
+GLuint GetArbServiceId(const TextureUnit& unit) {
+ return unit.bound_texture_rectangle_arb.get()
+ ? unit.bound_texture_rectangle_arb->service_id() : 0;
+}
+
+GLuint GetServiceId(const TextureUnit& unit, GLuint target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return Get2dServiceId(unit);
+ case GL_TEXTURE_CUBE_MAP:
+ return GetCubeServiceId(unit);
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return GetArbServiceId(unit);
+ case GL_TEXTURE_EXTERNAL_OES:
+ return GetOesServiceId(unit);
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+bool TargetIsSupported(const FeatureInfo* feature_info, GLuint target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return true;
+ case GL_TEXTURE_CUBE_MAP:
+ return true;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return feature_info->feature_flags().arb_texture_rectangle;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return feature_info->feature_flags().oes_egl_image_external;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+} // anonymous namespace.
+
+TextureUnit::TextureUnit()
+ : bind_target(GL_TEXTURE_2D) {
+}
+
+TextureUnit::~TextureUnit() {
+}
+
+ContextState::ContextState(FeatureInfo* feature_info,
+ ErrorStateClient* error_state_client,
+ Logger* logger)
+ : active_texture_unit(0),
+ bound_renderbuffer_valid(false),
+ pack_reverse_row_order(false),
+ ignore_cached_state(false),
+ fbo_binding_for_scissor_workaround_dirty_(false),
+ feature_info_(feature_info),
+ error_state_(ErrorState::Create(error_state_client, logger)) {
+ Initialize();
+}
+
+ContextState::~ContextState() {
+}
+
+void ContextState::RestoreTextureUnitBindings(
+ GLuint unit, const ContextState* prev_state) const {
+ DCHECK_LT(unit, texture_units.size());
+ const TextureUnit& texture_unit = texture_units[unit];
+ GLuint service_id_2d = Get2dServiceId(texture_unit);
+ GLuint service_id_cube = GetCubeServiceId(texture_unit);
+ GLuint service_id_oes = GetOesServiceId(texture_unit);
+ GLuint service_id_arb = GetArbServiceId(texture_unit);
+
+ bool bind_texture_2d = true;
+ bool bind_texture_cube = true;
+ bool bind_texture_oes = feature_info_->feature_flags().oes_egl_image_external;
+ bool bind_texture_arb = feature_info_->feature_flags().arb_texture_rectangle;
+
+ if (prev_state) {
+ const TextureUnit& prev_unit = prev_state->texture_units[unit];
+ bind_texture_2d = service_id_2d != Get2dServiceId(prev_unit);
+ bind_texture_cube = service_id_cube != GetCubeServiceId(prev_unit);
+ bind_texture_oes =
+ bind_texture_oes && service_id_oes != GetOesServiceId(prev_unit);
+ bind_texture_arb =
+ bind_texture_arb && service_id_arb != GetArbServiceId(prev_unit);
+ }
+
+ // Early-out if nothing has changed from the previous state.
+ if (!bind_texture_2d && !bind_texture_cube
+ && !bind_texture_oes && !bind_texture_arb) {
+ return;
+ }
+
+ glActiveTexture(GL_TEXTURE0 + unit);
+ if (bind_texture_2d) {
+ glBindTexture(GL_TEXTURE_2D, service_id_2d);
+ }
+ if (bind_texture_cube) {
+ glBindTexture(GL_TEXTURE_CUBE_MAP, service_id_cube);
+ }
+ if (bind_texture_oes) {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, service_id_oes);
+ }
+ if (bind_texture_arb) {
+ glBindTexture(GL_TEXTURE_RECTANGLE_ARB, service_id_arb);
+ }
+}
+
+void ContextState::RestoreBufferBindings() const {
+ if (vertex_attrib_manager.get()) {
+ Buffer* element_array_buffer =
+ vertex_attrib_manager->element_array_buffer();
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer ? element_array_buffer->service_id() : 0);
+ }
+ glBindBuffer(GL_ARRAY_BUFFER,
+ bound_array_buffer.get() ? bound_array_buffer->service_id() : 0);
+}
+
+void ContextState::RestoreRenderbufferBindings() {
+ // Require Renderbuffer rebind.
+ bound_renderbuffer_valid = false;
+}
+
+void ContextState::RestoreProgramBindings() const {
+ glUseProgram(current_program.get() ? current_program->service_id() : 0);
+}
+
+void ContextState::RestoreActiveTexture() const {
+ glActiveTexture(GL_TEXTURE0 + active_texture_unit);
+}
+
+void ContextState::RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const {
+ // Restore Texture state.
+ for (size_t ii = 0; ii < texture_units.size(); ++ii) {
+ RestoreTextureUnitBindings(ii, prev_state);
+ }
+ RestoreActiveTexture();
+}
+
+void ContextState::RestoreActiveTextureUnitBinding(unsigned int target) const {
+ DCHECK_LT(active_texture_unit, texture_units.size());
+ const TextureUnit& texture_unit = texture_units[active_texture_unit];
+ if (TargetIsSupported(feature_info_, target))
+ glBindTexture(target, GetServiceId(texture_unit, target));
+}
+
+void ContextState::RestoreVertexAttribValues() const {
+ for (size_t attrib = 0; attrib < vertex_attrib_manager->num_attribs();
+ ++attrib) {
+ glVertexAttrib4fv(attrib, attrib_values[attrib].v);
+ }
+}
+
+void ContextState::RestoreVertexAttribArrays(
+ const scoped_refptr<VertexAttribManager> attrib_manager) const {
+ // This is expected to be called only for VAO with service_id 0,
+ // either to restore the default VAO or a virtual VAO with service_id 0.
+ GLuint vao_service_id = attrib_manager->service_id();
+ DCHECK(vao_service_id == 0);
+
+ // Bind VAO if supported.
+ if (feature_info_->feature_flags().native_vertex_array_object)
+ glBindVertexArrayOES(vao_service_id);
+
+ // Restore vertex attrib arrays.
+ for (size_t attrib_index = 0; attrib_index < attrib_manager->num_attribs();
+ ++attrib_index) {
+ const VertexAttrib* attrib = attrib_manager->GetVertexAttrib(attrib_index);
+
+ // Restore vertex array.
+ Buffer* buffer = attrib->buffer();
+ GLuint buffer_service_id = buffer ? buffer->service_id() : 0;
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_service_id);
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ glVertexAttribPointer(attrib_index,
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+
+ // Restore attrib divisor if supported.
+ if (feature_info_->feature_flags().angle_instanced_arrays)
+ glVertexAttribDivisorANGLE(attrib_index, attrib->divisor());
+
+ // Never touch vertex attribute 0's state (in particular, never
+ // disable it) when running on desktop GL because it will never be
+ // re-enabled.
+ if (attrib_index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ if (attrib->enabled()) {
+ glEnableVertexAttribArray(attrib_index);
+ } else {
+ glDisableVertexAttribArray(attrib_index);
+ }
+ }
+ }
+}
+
+void ContextState::RestoreVertexAttribs() const {
+ // Restore Vertex Attrib Arrays
+ // TODO: This if should not be needed. RestoreState is getting called
+ // before GLES2Decoder::Initialize which is a bug.
+ if (vertex_attrib_manager.get()) {
+ // Restore VAOs.
+ if (feature_info_->feature_flags().native_vertex_array_object) {
+ // If default VAO is still using shared id 0 instead of unique ids
+ // per-context, default VAO state must be restored.
+ GLuint default_vao_service_id =
+ default_vertex_attrib_manager->service_id();
+ if (default_vao_service_id == 0)
+ RestoreVertexAttribArrays(default_vertex_attrib_manager);
+
+ // Restore the current VAO binding, unless it's the same as the
+ // default above.
+ GLuint curr_vao_service_id = vertex_attrib_manager->service_id();
+ if (curr_vao_service_id != 0)
+ glBindVertexArrayOES(curr_vao_service_id);
+ } else {
+ // If native VAO isn't supported, emulated VAOs are used.
+ // Restore to the currently bound VAO.
+ RestoreVertexAttribArrays(vertex_attrib_manager);
+ }
+ }
+
+ // glVertexAttrib4fv aren't part of VAO state and must be restored.
+ RestoreVertexAttribValues();
+}
+
+void ContextState::RestoreGlobalState(const ContextState* prev_state) const {
+ InitCapabilities(prev_state);
+ InitState(prev_state);
+}
+
+void ContextState::RestoreState(const ContextState* prev_state) {
+ RestoreAllTextureUnitBindings(prev_state);
+ RestoreVertexAttribs();
+ RestoreBufferBindings();
+ RestoreRenderbufferBindings();
+ RestoreProgramBindings();
+ RestoreGlobalState(prev_state);
+}
+
+ErrorState* ContextState::GetErrorState() {
+ return error_state_.get();
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/context_state_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/context_state.h b/gpu/command_buffer/service/context_state.h
new file mode 100644
index 0000000..7488f57
--- /dev/null
+++ b/gpu/command_buffer/service/context_state.h
@@ -0,0 +1,221 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ContextState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+
+#include <vector>
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Buffer;
+class ErrorState;
+class ErrorStateClient;
+class FeatureInfo;
+class Framebuffer;
+class Program;
+class Renderbuffer;
+
+// State associated with each texture unit.
+struct GPU_EXPORT TextureUnit {
+ TextureUnit();
+ ~TextureUnit();
+
+ // The last target that was bound to this texture unit.
+ GLenum bind_target;
+
+ // texture currently bound to this unit's GL_TEXTURE_2D with glBindTexture
+ scoped_refptr<TextureRef> bound_texture_2d;
+
+ // texture currently bound to this unit's GL_TEXTURE_CUBE_MAP with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_cube_map;
+
+ // texture currently bound to this unit's GL_TEXTURE_EXTERNAL_OES with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_external_oes;
+
+ // texture currently bound to this unit's GL_TEXTURE_RECTANGLE_ARB with
+ // glBindTexture
+ scoped_refptr<TextureRef> bound_texture_rectangle_arb;
+
+ scoped_refptr<TextureRef> GetInfoForSamplerType(
+ GLenum type) {
+ DCHECK(type == GL_SAMPLER_2D || type == GL_SAMPLER_CUBE ||
+ type == GL_SAMPLER_EXTERNAL_OES || type == GL_SAMPLER_2D_RECT_ARB);
+ switch (type) {
+ case GL_SAMPLER_2D:
+ return bound_texture_2d;
+ case GL_SAMPLER_CUBE:
+ return bound_texture_cube_map;
+ case GL_SAMPLER_EXTERNAL_OES:
+ return bound_texture_external_oes;
+ case GL_SAMPLER_2D_RECT_ARB:
+ return bound_texture_rectangle_arb;
+ }
+
+ NOTREACHED();
+ return NULL;
+ }
+
+ void Unbind(TextureRef* texture) {
+ if (bound_texture_2d.get() == texture) {
+ bound_texture_2d = NULL;
+ }
+ if (bound_texture_cube_map.get() == texture) {
+ bound_texture_cube_map = NULL;
+ }
+ if (bound_texture_external_oes.get() == texture) {
+ bound_texture_external_oes = NULL;
+ }
+ }
+};
+
+struct Vec4 {
+ Vec4() {
+ v[0] = 0.0f;
+ v[1] = 0.0f;
+ v[2] = 0.0f;
+ v[3] = 1.0f;
+ }
+ float v[4];
+};
+
+struct GPU_EXPORT ContextState {
+ ContextState(FeatureInfo* feature_info,
+ ErrorStateClient* error_state_client,
+ Logger* logger);
+ ~ContextState();
+
+ void Initialize();
+
+ void SetIgnoreCachedStateForTest(bool ignore) {
+ ignore_cached_state = ignore;
+ }
+
+ void RestoreState(const ContextState* prev_state);
+ void InitCapabilities(const ContextState* prev_state) const;
+ void InitState(const ContextState* prev_state) const;
+
+ void RestoreActiveTexture() const;
+ void RestoreAllTextureUnitBindings(const ContextState* prev_state) const;
+ void RestoreActiveTextureUnitBinding(unsigned int target) const;
+ void RestoreVertexAttribValues() const;
+ void RestoreVertexAttribArrays(
+ const scoped_refptr<VertexAttribManager> attrib_manager) const;
+ void RestoreVertexAttribs() const;
+ void RestoreBufferBindings() const;
+ void RestoreGlobalState(const ContextState* prev_state) const;
+ void RestoreProgramBindings() const;
+ void RestoreRenderbufferBindings();
+ void RestoreTextureUnitBindings(
+ GLuint unit, const ContextState* prev_state) const;
+
+ // Helper for getting cached state.
+ bool GetStateAsGLint(
+ GLenum pname, GLint* params, GLsizei* num_written) const;
+ bool GetStateAsGLfloat(
+ GLenum pname, GLfloat* params, GLsizei* num_written) const;
+ bool GetEnabled(GLenum cap) const;
+
+ inline void SetDeviceColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ if (cached_color_mask_red == red && cached_color_mask_green == green &&
+ cached_color_mask_blue == blue && cached_color_mask_alpha == alpha &&
+ !ignore_cached_state)
+ return;
+ cached_color_mask_red = red;
+ cached_color_mask_green = green;
+ cached_color_mask_blue = blue;
+ cached_color_mask_alpha = alpha;
+ glColorMask(red, green, blue, alpha);
+ }
+
+ inline void SetDeviceDepthMask(GLboolean mask) {
+ if (cached_depth_mask == mask && !ignore_cached_state)
+ return;
+ cached_depth_mask = mask;
+ glDepthMask(mask);
+ }
+
+ inline void SetDeviceStencilMaskSeparate(GLenum op, GLuint mask) {
+ if (op == GL_FRONT) {
+ if (cached_stencil_front_writemask == mask && !ignore_cached_state)
+ return;
+ cached_stencil_front_writemask = mask;
+ } else if (op == GL_BACK) {
+ if (cached_stencil_back_writemask == mask && !ignore_cached_state)
+ return;
+ cached_stencil_back_writemask = mask;
+ } else {
+ NOTREACHED();
+ return;
+ }
+ glStencilMaskSeparate(op, mask);
+ }
+
+ ErrorState* GetErrorState();
+
+ #include "gpu/command_buffer/service/context_state_autogen.h"
+
+ EnableFlags enable_flags;
+
+ // Current active texture by 0 - n index.
+ // In other words, if we call glActiveTexture(GL_TEXTURE2) this value would
+ // be 2.
+ GLuint active_texture_unit;
+
+ // The currently bound array buffer. If this is 0 it is illegal to call
+ // glVertexAttribPointer.
+ scoped_refptr<Buffer> bound_array_buffer;
+
+ // Which textures are bound to texture units through glActiveTexture.
+ std::vector<TextureUnit> texture_units;
+
+ // The values for each attrib.
+ std::vector<Vec4> attrib_values;
+
+ // Class that manages vertex attribs.
+ scoped_refptr<VertexAttribManager> vertex_attrib_manager;
+ scoped_refptr<VertexAttribManager> default_vertex_attrib_manager;
+
+ // The program in use by glUseProgram
+ scoped_refptr<Program> current_program;
+
+ // The currently bound renderbuffer
+ scoped_refptr<Renderbuffer> bound_renderbuffer;
+ bool bound_renderbuffer_valid;
+
+ // A map of of target -> Query for current queries
+ typedef std::map<GLuint, scoped_refptr<QueryManager::Query> > QueryMap;
+ QueryMap current_queries;
+
+ bool pack_reverse_row_order;
+ bool ignore_cached_state;
+
+ mutable bool fbo_binding_for_scissor_workaround_dirty_;
+ FeatureInfo* feature_info_;
+
+ private:
+ scoped_ptr<ErrorState> error_state_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_H_
+
diff --git a/gpu/command_buffer/service/context_state_autogen.h b/gpu/command_buffer/service/context_state_autogen.h
new file mode 100644
index 0000000..fcae244
--- /dev/null
+++ b/gpu/command_buffer/service/context_state_autogen.h
@@ -0,0 +1,162 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by context_state.h
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
+
+struct EnableFlags {
+ EnableFlags();
+ bool blend;
+ bool cached_blend;
+ bool cull_face;
+ bool cached_cull_face;
+ bool depth_test;
+ bool cached_depth_test;
+ bool dither;
+ bool cached_dither;
+ bool polygon_offset_fill;
+ bool cached_polygon_offset_fill;
+ bool sample_alpha_to_coverage;
+ bool cached_sample_alpha_to_coverage;
+ bool sample_coverage;
+ bool cached_sample_coverage;
+ bool scissor_test;
+ bool cached_scissor_test;
+ bool stencil_test;
+ bool cached_stencil_test;
+};
+
+GLfloat blend_color_red;
+GLfloat blend_color_green;
+GLfloat blend_color_blue;
+GLfloat blend_color_alpha;
+GLenum blend_equation_rgb;
+GLenum blend_equation_alpha;
+GLenum blend_source_rgb;
+GLenum blend_dest_rgb;
+GLenum blend_source_alpha;
+GLenum blend_dest_alpha;
+GLfloat color_clear_red;
+GLfloat color_clear_green;
+GLfloat color_clear_blue;
+GLfloat color_clear_alpha;
+GLclampf depth_clear;
+GLint stencil_clear;
+GLboolean color_mask_red;
+GLboolean cached_color_mask_red;
+GLboolean color_mask_green;
+GLboolean cached_color_mask_green;
+GLboolean color_mask_blue;
+GLboolean cached_color_mask_blue;
+GLboolean color_mask_alpha;
+GLboolean cached_color_mask_alpha;
+GLenum cull_mode;
+GLenum depth_func;
+GLboolean depth_mask;
+GLboolean cached_depth_mask;
+GLclampf z_near;
+GLclampf z_far;
+GLenum front_face;
+GLenum hint_generate_mipmap;
+GLenum hint_fragment_shader_derivative;
+GLfloat line_width;
+GLfloat modelview_matrix[16];
+GLfloat projection_matrix[16];
+GLint pack_alignment;
+GLint unpack_alignment;
+GLfloat polygon_offset_factor;
+GLfloat polygon_offset_units;
+GLclampf sample_coverage_value;
+GLboolean sample_coverage_invert;
+GLint scissor_x;
+GLint scissor_y;
+GLsizei scissor_width;
+GLsizei scissor_height;
+GLenum stencil_front_func;
+GLint stencil_front_ref;
+GLuint stencil_front_mask;
+GLenum stencil_back_func;
+GLint stencil_back_ref;
+GLuint stencil_back_mask;
+GLuint stencil_front_writemask;
+GLuint cached_stencil_front_writemask;
+GLuint stencil_back_writemask;
+GLuint cached_stencil_back_writemask;
+GLenum stencil_front_fail_op;
+GLenum stencil_front_z_fail_op;
+GLenum stencil_front_z_pass_op;
+GLenum stencil_back_fail_op;
+GLenum stencil_back_z_fail_op;
+GLenum stencil_back_z_pass_op;
+GLint viewport_x;
+GLint viewport_y;
+GLsizei viewport_width;
+GLsizei viewport_height;
+
+inline void SetDeviceCapabilityState(GLenum cap, bool enable) {
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags.cached_blend == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_blend = enable;
+ break;
+ case GL_CULL_FACE:
+ if (enable_flags.cached_cull_face == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_cull_face = enable;
+ break;
+ case GL_DEPTH_TEST:
+ if (enable_flags.cached_depth_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_depth_test = enable;
+ break;
+ case GL_DITHER:
+ if (enable_flags.cached_dither == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_dither = enable;
+ break;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags.cached_polygon_offset_fill == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_polygon_offset_fill = enable;
+ break;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags.cached_sample_alpha_to_coverage == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_sample_alpha_to_coverage = enable;
+ break;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags.cached_sample_coverage == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_sample_coverage = enable;
+ break;
+ case GL_SCISSOR_TEST:
+ if (enable_flags.cached_scissor_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_scissor_test = enable;
+ break;
+ case GL_STENCIL_TEST:
+ if (enable_flags.cached_stencil_test == enable && !ignore_cached_state)
+ return;
+ enable_flags.cached_stencil_test = enable;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ if (enable)
+ glEnable(cap);
+ else
+ glDisable(cap);
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/context_state_impl_autogen.h b/gpu/command_buffer/service/context_state_impl_autogen.h
new file mode 100644
index 0000000..1b1e5fe
--- /dev/null
+++ b/gpu/command_buffer/service/context_state_impl_autogen.h
@@ -0,0 +1,1068 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by context_state.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
+
+ContextState::EnableFlags::EnableFlags()
+ : blend(false),
+ cached_blend(false),
+ cull_face(false),
+ cached_cull_face(false),
+ depth_test(false),
+ cached_depth_test(false),
+ dither(true),
+ cached_dither(true),
+ polygon_offset_fill(false),
+ cached_polygon_offset_fill(false),
+ sample_alpha_to_coverage(false),
+ cached_sample_alpha_to_coverage(false),
+ sample_coverage(false),
+ cached_sample_coverage(false),
+ scissor_test(false),
+ cached_scissor_test(false),
+ stencil_test(false),
+ cached_stencil_test(false) {
+}
+
+void ContextState::Initialize() {
+ blend_color_red = 0.0f;
+ blend_color_green = 0.0f;
+ blend_color_blue = 0.0f;
+ blend_color_alpha = 0.0f;
+ blend_equation_rgb = GL_FUNC_ADD;
+ blend_equation_alpha = GL_FUNC_ADD;
+ blend_source_rgb = GL_ONE;
+ blend_dest_rgb = GL_ZERO;
+ blend_source_alpha = GL_ONE;
+ blend_dest_alpha = GL_ZERO;
+ color_clear_red = 0.0f;
+ color_clear_green = 0.0f;
+ color_clear_blue = 0.0f;
+ color_clear_alpha = 0.0f;
+ depth_clear = 1.0f;
+ stencil_clear = 0;
+ color_mask_red = true;
+ cached_color_mask_red = true;
+ color_mask_green = true;
+ cached_color_mask_green = true;
+ color_mask_blue = true;
+ cached_color_mask_blue = true;
+ color_mask_alpha = true;
+ cached_color_mask_alpha = true;
+ cull_mode = GL_BACK;
+ depth_func = GL_LESS;
+ depth_mask = true;
+ cached_depth_mask = true;
+ z_near = 0.0f;
+ z_far = 1.0f;
+ front_face = GL_CCW;
+ hint_generate_mipmap = GL_DONT_CARE;
+ hint_fragment_shader_derivative = GL_DONT_CARE;
+ line_width = 1.0f;
+ modelview_matrix[0] = 1.0f;
+ modelview_matrix[1] = 0.0f;
+ modelview_matrix[2] = 0.0f;
+ modelview_matrix[3] = 0.0f;
+ modelview_matrix[4] = 0.0f;
+ modelview_matrix[5] = 1.0f;
+ modelview_matrix[6] = 0.0f;
+ modelview_matrix[7] = 0.0f;
+ modelview_matrix[8] = 0.0f;
+ modelview_matrix[9] = 0.0f;
+ modelview_matrix[10] = 1.0f;
+ modelview_matrix[11] = 0.0f;
+ modelview_matrix[12] = 0.0f;
+ modelview_matrix[13] = 0.0f;
+ modelview_matrix[14] = 0.0f;
+ modelview_matrix[15] = 1.0f;
+ projection_matrix[0] = 1.0f;
+ projection_matrix[1] = 0.0f;
+ projection_matrix[2] = 0.0f;
+ projection_matrix[3] = 0.0f;
+ projection_matrix[4] = 0.0f;
+ projection_matrix[5] = 1.0f;
+ projection_matrix[6] = 0.0f;
+ projection_matrix[7] = 0.0f;
+ projection_matrix[8] = 0.0f;
+ projection_matrix[9] = 0.0f;
+ projection_matrix[10] = 1.0f;
+ projection_matrix[11] = 0.0f;
+ projection_matrix[12] = 0.0f;
+ projection_matrix[13] = 0.0f;
+ projection_matrix[14] = 0.0f;
+ projection_matrix[15] = 1.0f;
+ pack_alignment = 4;
+ unpack_alignment = 4;
+ polygon_offset_factor = 0.0f;
+ polygon_offset_units = 0.0f;
+ sample_coverage_value = 1.0f;
+ sample_coverage_invert = false;
+ scissor_x = 0;
+ scissor_y = 0;
+ scissor_width = 1;
+ scissor_height = 1;
+ stencil_front_func = GL_ALWAYS;
+ stencil_front_ref = 0;
+ stencil_front_mask = 0xFFFFFFFFU;
+ stencil_back_func = GL_ALWAYS;
+ stencil_back_ref = 0;
+ stencil_back_mask = 0xFFFFFFFFU;
+ stencil_front_writemask = 0xFFFFFFFFU;
+ cached_stencil_front_writemask = 0xFFFFFFFFU;
+ stencil_back_writemask = 0xFFFFFFFFU;
+ cached_stencil_back_writemask = 0xFFFFFFFFU;
+ stencil_front_fail_op = GL_KEEP;
+ stencil_front_z_fail_op = GL_KEEP;
+ stencil_front_z_pass_op = GL_KEEP;
+ stencil_back_fail_op = GL_KEEP;
+ stencil_back_z_fail_op = GL_KEEP;
+ stencil_back_z_pass_op = GL_KEEP;
+ viewport_x = 0;
+ viewport_y = 0;
+ viewport_width = 1;
+ viewport_height = 1;
+}
+
+void ContextState::InitCapabilities(const ContextState* prev_state) const {
+ if (prev_state) {
+ if (prev_state->enable_flags.cached_blend != enable_flags.cached_blend)
+ EnableDisable(GL_BLEND, enable_flags.cached_blend);
+ if (prev_state->enable_flags.cached_cull_face !=
+ enable_flags.cached_cull_face)
+ EnableDisable(GL_CULL_FACE, enable_flags.cached_cull_face);
+ if (prev_state->enable_flags.cached_depth_test !=
+ enable_flags.cached_depth_test)
+ EnableDisable(GL_DEPTH_TEST, enable_flags.cached_depth_test);
+ if (prev_state->enable_flags.cached_dither != enable_flags.cached_dither)
+ EnableDisable(GL_DITHER, enable_flags.cached_dither);
+ if (prev_state->enable_flags.cached_polygon_offset_fill !=
+ enable_flags.cached_polygon_offset_fill)
+ EnableDisable(GL_POLYGON_OFFSET_FILL,
+ enable_flags.cached_polygon_offset_fill);
+ if (prev_state->enable_flags.cached_sample_alpha_to_coverage !=
+ enable_flags.cached_sample_alpha_to_coverage)
+ EnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE,
+ enable_flags.cached_sample_alpha_to_coverage);
+ if (prev_state->enable_flags.cached_sample_coverage !=
+ enable_flags.cached_sample_coverage)
+ EnableDisable(GL_SAMPLE_COVERAGE, enable_flags.cached_sample_coverage);
+ if (prev_state->enable_flags.cached_scissor_test !=
+ enable_flags.cached_scissor_test)
+ EnableDisable(GL_SCISSOR_TEST, enable_flags.cached_scissor_test);
+ if (prev_state->enable_flags.cached_stencil_test !=
+ enable_flags.cached_stencil_test)
+ EnableDisable(GL_STENCIL_TEST, enable_flags.cached_stencil_test);
+ } else {
+ EnableDisable(GL_BLEND, enable_flags.cached_blend);
+ EnableDisable(GL_CULL_FACE, enable_flags.cached_cull_face);
+ EnableDisable(GL_DEPTH_TEST, enable_flags.cached_depth_test);
+ EnableDisable(GL_DITHER, enable_flags.cached_dither);
+ EnableDisable(GL_POLYGON_OFFSET_FILL,
+ enable_flags.cached_polygon_offset_fill);
+ EnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE,
+ enable_flags.cached_sample_alpha_to_coverage);
+ EnableDisable(GL_SAMPLE_COVERAGE, enable_flags.cached_sample_coverage);
+ EnableDisable(GL_SCISSOR_TEST, enable_flags.cached_scissor_test);
+ EnableDisable(GL_STENCIL_TEST, enable_flags.cached_stencil_test);
+ }
+}
+
+void ContextState::InitState(const ContextState* prev_state) const {
+ if (prev_state) {
+ if ((blend_color_red != prev_state->blend_color_red) ||
+ (blend_color_green != prev_state->blend_color_green) ||
+ (blend_color_blue != prev_state->blend_color_blue) ||
+ (blend_color_alpha != prev_state->blend_color_alpha))
+ glBlendColor(blend_color_red,
+ blend_color_green,
+ blend_color_blue,
+ blend_color_alpha);
+ if ((blend_equation_rgb != prev_state->blend_equation_rgb) ||
+ (blend_equation_alpha != prev_state->blend_equation_alpha))
+ glBlendEquationSeparate(blend_equation_rgb, blend_equation_alpha);
+ if ((blend_source_rgb != prev_state->blend_source_rgb) ||
+ (blend_dest_rgb != prev_state->blend_dest_rgb) ||
+ (blend_source_alpha != prev_state->blend_source_alpha) ||
+ (blend_dest_alpha != prev_state->blend_dest_alpha))
+ glBlendFuncSeparate(blend_source_rgb,
+ blend_dest_rgb,
+ blend_source_alpha,
+ blend_dest_alpha);
+ if ((color_clear_red != prev_state->color_clear_red) ||
+ (color_clear_green != prev_state->color_clear_green) ||
+ (color_clear_blue != prev_state->color_clear_blue) ||
+ (color_clear_alpha != prev_state->color_clear_alpha))
+ glClearColor(color_clear_red,
+ color_clear_green,
+ color_clear_blue,
+ color_clear_alpha);
+ if ((depth_clear != prev_state->depth_clear))
+ glClearDepth(depth_clear);
+ if ((stencil_clear != prev_state->stencil_clear))
+ glClearStencil(stencil_clear);
+ if ((cached_color_mask_red != prev_state->cached_color_mask_red) ||
+ (cached_color_mask_green != prev_state->cached_color_mask_green) ||
+ (cached_color_mask_blue != prev_state->cached_color_mask_blue) ||
+ (cached_color_mask_alpha != prev_state->cached_color_mask_alpha))
+ glColorMask(cached_color_mask_red,
+ cached_color_mask_green,
+ cached_color_mask_blue,
+ cached_color_mask_alpha);
+ if ((cull_mode != prev_state->cull_mode))
+ glCullFace(cull_mode);
+ if ((depth_func != prev_state->depth_func))
+ glDepthFunc(depth_func);
+ if ((cached_depth_mask != prev_state->cached_depth_mask))
+ glDepthMask(cached_depth_mask);
+ if ((z_near != prev_state->z_near) || (z_far != prev_state->z_far))
+ glDepthRange(z_near, z_far);
+ if ((front_face != prev_state->front_face))
+ glFrontFace(front_face);
+ if (prev_state->hint_generate_mipmap != hint_generate_mipmap) {
+ glHint(GL_GENERATE_MIPMAP_HINT, hint_generate_mipmap);
+ }
+ if (feature_info_->feature_flags().oes_standard_derivatives) {
+ if (prev_state->hint_fragment_shader_derivative !=
+ hint_fragment_shader_derivative) {
+ glHint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES,
+ hint_fragment_shader_derivative);
+ }
+ }
+ if ((line_width != prev_state->line_width))
+ glLineWidth(line_width);
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ if (memcmp(prev_state->modelview_matrix,
+ modelview_matrix,
+ sizeof(GLfloat) * 16)) {
+ glMatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
+ }
+ }
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ if (memcmp(prev_state->projection_matrix,
+ projection_matrix,
+ sizeof(GLfloat) * 16)) {
+ glMatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, projection_matrix);
+ }
+ }
+ if (prev_state->pack_alignment != pack_alignment) {
+ glPixelStorei(GL_PACK_ALIGNMENT, pack_alignment);
+ }
+ if (prev_state->unpack_alignment != unpack_alignment) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, unpack_alignment);
+ }
+ if ((polygon_offset_factor != prev_state->polygon_offset_factor) ||
+ (polygon_offset_units != prev_state->polygon_offset_units))
+ glPolygonOffset(polygon_offset_factor, polygon_offset_units);
+ if ((sample_coverage_value != prev_state->sample_coverage_value) ||
+ (sample_coverage_invert != prev_state->sample_coverage_invert))
+ glSampleCoverage(sample_coverage_value, sample_coverage_invert);
+ if ((scissor_x != prev_state->scissor_x) ||
+ (scissor_y != prev_state->scissor_y) ||
+ (scissor_width != prev_state->scissor_width) ||
+ (scissor_height != prev_state->scissor_height))
+ glScissor(scissor_x, scissor_y, scissor_width, scissor_height);
+ if ((stencil_front_func != prev_state->stencil_front_func) ||
+ (stencil_front_ref != prev_state->stencil_front_ref) ||
+ (stencil_front_mask != prev_state->stencil_front_mask))
+ glStencilFuncSeparate(
+ GL_FRONT, stencil_front_func, stencil_front_ref, stencil_front_mask);
+ if ((stencil_back_func != prev_state->stencil_back_func) ||
+ (stencil_back_ref != prev_state->stencil_back_ref) ||
+ (stencil_back_mask != prev_state->stencil_back_mask))
+ glStencilFuncSeparate(
+ GL_BACK, stencil_back_func, stencil_back_ref, stencil_back_mask);
+ if ((cached_stencil_front_writemask !=
+ prev_state->cached_stencil_front_writemask))
+ glStencilMaskSeparate(GL_FRONT, cached_stencil_front_writemask);
+ if ((cached_stencil_back_writemask !=
+ prev_state->cached_stencil_back_writemask))
+ glStencilMaskSeparate(GL_BACK, cached_stencil_back_writemask);
+ if ((stencil_front_fail_op != prev_state->stencil_front_fail_op) ||
+ (stencil_front_z_fail_op != prev_state->stencil_front_z_fail_op) ||
+ (stencil_front_z_pass_op != prev_state->stencil_front_z_pass_op))
+ glStencilOpSeparate(GL_FRONT,
+ stencil_front_fail_op,
+ stencil_front_z_fail_op,
+ stencil_front_z_pass_op);
+ if ((stencil_back_fail_op != prev_state->stencil_back_fail_op) ||
+ (stencil_back_z_fail_op != prev_state->stencil_back_z_fail_op) ||
+ (stencil_back_z_pass_op != prev_state->stencil_back_z_pass_op))
+ glStencilOpSeparate(GL_BACK,
+ stencil_back_fail_op,
+ stencil_back_z_fail_op,
+ stencil_back_z_pass_op);
+ if ((viewport_x != prev_state->viewport_x) ||
+ (viewport_y != prev_state->viewport_y) ||
+ (viewport_width != prev_state->viewport_width) ||
+ (viewport_height != prev_state->viewport_height))
+ glViewport(viewport_x, viewport_y, viewport_width, viewport_height);
+ } else {
+ glBlendColor(blend_color_red,
+ blend_color_green,
+ blend_color_blue,
+ blend_color_alpha);
+ glBlendEquationSeparate(blend_equation_rgb, blend_equation_alpha);
+ glBlendFuncSeparate(
+ blend_source_rgb, blend_dest_rgb, blend_source_alpha, blend_dest_alpha);
+ glClearColor(color_clear_red,
+ color_clear_green,
+ color_clear_blue,
+ color_clear_alpha);
+ glClearDepth(depth_clear);
+ glClearStencil(stencil_clear);
+ glColorMask(cached_color_mask_red,
+ cached_color_mask_green,
+ cached_color_mask_blue,
+ cached_color_mask_alpha);
+ glCullFace(cull_mode);
+ glDepthFunc(depth_func);
+ glDepthMask(cached_depth_mask);
+ glDepthRange(z_near, z_far);
+ glFrontFace(front_face);
+ glHint(GL_GENERATE_MIPMAP_HINT, hint_generate_mipmap);
+ if (feature_info_->feature_flags().oes_standard_derivatives) {
+ glHint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES,
+ hint_fragment_shader_derivative);
+ }
+ glLineWidth(line_width);
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ glMatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
+ }
+ if (feature_info_->feature_flags().chromium_path_rendering) {
+ glMatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, projection_matrix);
+ }
+ glPixelStorei(GL_PACK_ALIGNMENT, pack_alignment);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, unpack_alignment);
+ glPolygonOffset(polygon_offset_factor, polygon_offset_units);
+ glSampleCoverage(sample_coverage_value, sample_coverage_invert);
+ glScissor(scissor_x, scissor_y, scissor_width, scissor_height);
+ glStencilFuncSeparate(
+ GL_FRONT, stencil_front_func, stencil_front_ref, stencil_front_mask);
+ glStencilFuncSeparate(
+ GL_BACK, stencil_back_func, stencil_back_ref, stencil_back_mask);
+ glStencilMaskSeparate(GL_FRONT, cached_stencil_front_writemask);
+ glStencilMaskSeparate(GL_BACK, cached_stencil_back_writemask);
+ glStencilOpSeparate(GL_FRONT,
+ stencil_front_fail_op,
+ stencil_front_z_fail_op,
+ stencil_front_z_pass_op);
+ glStencilOpSeparate(GL_BACK,
+ stencil_back_fail_op,
+ stencil_back_z_fail_op,
+ stencil_back_z_pass_op);
+ glViewport(viewport_x, viewport_y, viewport_width, viewport_height);
+ }
+}
+bool ContextState::GetEnabled(GLenum cap) const {
+ switch (cap) {
+ case GL_BLEND:
+ return enable_flags.blend;
+ case GL_CULL_FACE:
+ return enable_flags.cull_face;
+ case GL_DEPTH_TEST:
+ return enable_flags.depth_test;
+ case GL_DITHER:
+ return enable_flags.dither;
+ case GL_POLYGON_OFFSET_FILL:
+ return enable_flags.polygon_offset_fill;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ return enable_flags.sample_alpha_to_coverage;
+ case GL_SAMPLE_COVERAGE:
+ return enable_flags.sample_coverage;
+ case GL_SCISSOR_TEST:
+ return enable_flags.scissor_test;
+ case GL_STENCIL_TEST:
+ return enable_flags.stencil_test;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool ContextState::GetStateAsGLint(GLenum pname,
+ GLint* params,
+ GLsizei* num_written) const {
+ switch (pname) {
+ case GL_BLEND_COLOR:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_color_red);
+ params[1] = static_cast<GLint>(blend_color_green);
+ params[2] = static_cast<GLint>(blend_color_blue);
+ params[3] = static_cast<GLint>(blend_color_alpha);
+ }
+ return true;
+ case GL_BLEND_EQUATION_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_equation_rgb);
+ }
+ return true;
+ case GL_BLEND_EQUATION_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_equation_alpha);
+ }
+ return true;
+ case GL_BLEND_SRC_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_source_rgb);
+ }
+ return true;
+ case GL_BLEND_DST_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_dest_rgb);
+ }
+ return true;
+ case GL_BLEND_SRC_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_source_alpha);
+ }
+ return true;
+ case GL_BLEND_DST_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(blend_dest_alpha);
+ }
+ return true;
+ case GL_COLOR_CLEAR_VALUE:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(color_clear_red);
+ params[1] = static_cast<GLint>(color_clear_green);
+ params[2] = static_cast<GLint>(color_clear_blue);
+ params[3] = static_cast<GLint>(color_clear_alpha);
+ }
+ return true;
+ case GL_DEPTH_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_clear);
+ }
+ return true;
+ case GL_STENCIL_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_clear);
+ }
+ return true;
+ case GL_COLOR_WRITEMASK:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(color_mask_red);
+ params[1] = static_cast<GLint>(color_mask_green);
+ params[2] = static_cast<GLint>(color_mask_blue);
+ params[3] = static_cast<GLint>(color_mask_alpha);
+ }
+ return true;
+ case GL_CULL_FACE_MODE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(cull_mode);
+ }
+ return true;
+ case GL_DEPTH_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_func);
+ }
+ return true;
+ case GL_DEPTH_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(depth_mask);
+ }
+ return true;
+ case GL_DEPTH_RANGE:
+ *num_written = 2;
+ if (params) {
+ params[0] = static_cast<GLint>(z_near);
+ params[1] = static_cast<GLint>(z_far);
+ }
+ return true;
+ case GL_FRONT_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(front_face);
+ }
+ return true;
+ case GL_GENERATE_MIPMAP_HINT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(hint_generate_mipmap);
+ }
+ return true;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(hint_fragment_shader_derivative);
+ }
+ return true;
+ case GL_LINE_WIDTH:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(line_width);
+ }
+ return true;
+ case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ for (size_t i = 0; i < 16; ++i) {
+ params[i] = static_cast<GLint>(round(modelview_matrix[i]));
+ }
+ }
+ return true;
+ case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ for (size_t i = 0; i < 16; ++i) {
+ params[i] = static_cast<GLint>(round(projection_matrix[i]));
+ }
+ }
+ return true;
+ case GL_PACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(pack_alignment);
+ }
+ return true;
+ case GL_UNPACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(unpack_alignment);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FACTOR:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(round(polygon_offset_factor));
+ }
+ return true;
+ case GL_POLYGON_OFFSET_UNITS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(round(polygon_offset_units));
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(sample_coverage_value);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_INVERT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(sample_coverage_invert);
+ }
+ return true;
+ case GL_SCISSOR_BOX:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(scissor_x);
+ params[1] = static_cast<GLint>(scissor_y);
+ params[2] = static_cast<GLint>(scissor_width);
+ params[3] = static_cast<GLint>(scissor_height);
+ }
+ return true;
+ case GL_STENCIL_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_func);
+ }
+ return true;
+ case GL_STENCIL_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_ref);
+ }
+ return true;
+ case GL_STENCIL_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_mask);
+ }
+ return true;
+ case GL_STENCIL_BACK_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_func);
+ }
+ return true;
+ case GL_STENCIL_BACK_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_ref);
+ }
+ return true;
+ case GL_STENCIL_BACK_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_mask);
+ }
+ return true;
+ case GL_STENCIL_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_writemask);
+ }
+ return true;
+ case GL_STENCIL_BACK_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_writemask);
+ }
+ return true;
+ case GL_STENCIL_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_front_z_pass_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(stencil_back_z_pass_op);
+ }
+ return true;
+ case GL_VIEWPORT:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLint>(viewport_x);
+ params[1] = static_cast<GLint>(viewport_y);
+ params[2] = static_cast<GLint>(viewport_width);
+ params[3] = static_cast<GLint>(viewport_height);
+ }
+ return true;
+ case GL_BLEND:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.blend);
+ }
+ return true;
+ case GL_CULL_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.cull_face);
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.depth_test);
+ }
+ return true;
+ case GL_DITHER:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.dither);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.polygon_offset_fill);
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.sample_alpha_to_coverage);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.sample_coverage);
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.scissor_test);
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.stencil_test);
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ContextState::GetStateAsGLfloat(GLenum pname,
+ GLfloat* params,
+ GLsizei* num_written) const {
+ switch (pname) {
+ case GL_BLEND_COLOR:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_color_red);
+ params[1] = static_cast<GLfloat>(blend_color_green);
+ params[2] = static_cast<GLfloat>(blend_color_blue);
+ params[3] = static_cast<GLfloat>(blend_color_alpha);
+ }
+ return true;
+ case GL_BLEND_EQUATION_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_equation_rgb);
+ }
+ return true;
+ case GL_BLEND_EQUATION_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_equation_alpha);
+ }
+ return true;
+ case GL_BLEND_SRC_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_source_rgb);
+ }
+ return true;
+ case GL_BLEND_DST_RGB:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_dest_rgb);
+ }
+ return true;
+ case GL_BLEND_SRC_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_source_alpha);
+ }
+ return true;
+ case GL_BLEND_DST_ALPHA:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(blend_dest_alpha);
+ }
+ return true;
+ case GL_COLOR_CLEAR_VALUE:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(color_clear_red);
+ params[1] = static_cast<GLfloat>(color_clear_green);
+ params[2] = static_cast<GLfloat>(color_clear_blue);
+ params[3] = static_cast<GLfloat>(color_clear_alpha);
+ }
+ return true;
+ case GL_DEPTH_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_clear);
+ }
+ return true;
+ case GL_STENCIL_CLEAR_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_clear);
+ }
+ return true;
+ case GL_COLOR_WRITEMASK:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(color_mask_red);
+ params[1] = static_cast<GLfloat>(color_mask_green);
+ params[2] = static_cast<GLfloat>(color_mask_blue);
+ params[3] = static_cast<GLfloat>(color_mask_alpha);
+ }
+ return true;
+ case GL_CULL_FACE_MODE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(cull_mode);
+ }
+ return true;
+ case GL_DEPTH_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_func);
+ }
+ return true;
+ case GL_DEPTH_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(depth_mask);
+ }
+ return true;
+ case GL_DEPTH_RANGE:
+ *num_written = 2;
+ if (params) {
+ params[0] = static_cast<GLfloat>(z_near);
+ params[1] = static_cast<GLfloat>(z_far);
+ }
+ return true;
+ case GL_FRONT_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(front_face);
+ }
+ return true;
+ case GL_GENERATE_MIPMAP_HINT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(hint_generate_mipmap);
+ }
+ return true;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(hint_fragment_shader_derivative);
+ }
+ return true;
+ case GL_LINE_WIDTH:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(line_width);
+ }
+ return true;
+ case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ memcpy(params, modelview_matrix, sizeof(GLfloat) * 16);
+ }
+ return true;
+ case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
+ *num_written = 16;
+ if (params) {
+ memcpy(params, projection_matrix, sizeof(GLfloat) * 16);
+ }
+ return true;
+ case GL_PACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(pack_alignment);
+ }
+ return true;
+ case GL_UNPACK_ALIGNMENT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(unpack_alignment);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FACTOR:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(polygon_offset_factor);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_UNITS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(polygon_offset_units);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_VALUE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(sample_coverage_value);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE_INVERT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(sample_coverage_invert);
+ }
+ return true;
+ case GL_SCISSOR_BOX:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(scissor_x);
+ params[1] = static_cast<GLfloat>(scissor_y);
+ params[2] = static_cast<GLfloat>(scissor_width);
+ params[3] = static_cast<GLfloat>(scissor_height);
+ }
+ return true;
+ case GL_STENCIL_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_func);
+ }
+ return true;
+ case GL_STENCIL_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_ref);
+ }
+ return true;
+ case GL_STENCIL_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_mask);
+ }
+ return true;
+ case GL_STENCIL_BACK_FUNC:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_func);
+ }
+ return true;
+ case GL_STENCIL_BACK_REF:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_ref);
+ }
+ return true;
+ case GL_STENCIL_BACK_VALUE_MASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_mask);
+ }
+ return true;
+ case GL_STENCIL_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_writemask);
+ }
+ return true;
+ case GL_STENCIL_BACK_WRITEMASK:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_writemask);
+ }
+ return true;
+ case GL_STENCIL_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_front_z_pass_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_z_fail_op);
+ }
+ return true;
+ case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(stencil_back_z_pass_op);
+ }
+ return true;
+ case GL_VIEWPORT:
+ *num_written = 4;
+ if (params) {
+ params[0] = static_cast<GLfloat>(viewport_x);
+ params[1] = static_cast<GLfloat>(viewport_y);
+ params[2] = static_cast<GLfloat>(viewport_width);
+ params[3] = static_cast<GLfloat>(viewport_height);
+ }
+ return true;
+ case GL_BLEND:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.blend);
+ }
+ return true;
+ case GL_CULL_FACE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.cull_face);
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.depth_test);
+ }
+ return true;
+ case GL_DITHER:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.dither);
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.polygon_offset_fill);
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.sample_alpha_to_coverage);
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.sample_coverage);
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.scissor_test);
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.stencil_test);
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/disk_cache_proto.proto b/gpu/command_buffer/service/disk_cache_proto.proto
new file mode 100644
index 0000000..5a55943
--- /dev/null
+++ b/gpu/command_buffer/service/disk_cache_proto.proto
@@ -0,0 +1,26 @@
+option optimize_for = LITE_RUNTIME;
+
+message ShaderInfoProto {
+ optional int32 type = 1;
+ optional int32 size = 2;
+ optional string name = 3;
+ optional string key = 4;
+ optional int32 precision = 5;
+ optional int32 static_use = 6;
+}
+
+message ShaderProto {
+ optional bytes sha = 1;
+ repeated ShaderInfoProto attribs = 2;
+ repeated ShaderInfoProto uniforms = 3;
+ repeated ShaderInfoProto varyings = 4;
+}
+
+message GpuProgramProto {
+ optional bytes sha = 1;
+ optional int32 format = 2;
+ optional bytes program = 3;
+
+ optional ShaderProto vertex_shader = 4;
+ optional ShaderProto fragment_shader = 5;
+}
diff --git a/gpu/command_buffer/service/error_state.cc b/gpu/command_buffer/service/error_state.cc
new file mode 100644
index 0000000..ce65aa1
--- /dev/null
+++ b/gpu/command_buffer/service/error_state.cc
@@ -0,0 +1,205 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state.h"
+
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ErrorStateImpl : public ErrorState {
+ public:
+ explicit ErrorStateImpl(ErrorStateClient* client, Logger* logger);
+ virtual ~ErrorStateImpl();
+
+ virtual uint32 GetGLError() OVERRIDE;
+
+ virtual void SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) OVERRIDE;
+ virtual void SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) OVERRIDE;
+ virtual void SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ int param) OVERRIDE;
+ virtual void SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ float param) OVERRIDE;
+
+ virtual unsigned int PeekGLError(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ virtual void CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ virtual void ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) OVERRIDE;
+
+ private:
+ // The last error message set.
+ std::string last_error_;
+ // Current GL error bits.
+ uint32 error_bits_;
+
+ ErrorStateClient* client_;
+ Logger* logger_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorStateImpl);
+};
+
+ErrorState::ErrorState() {}
+
+ErrorState::~ErrorState() {}
+
+ErrorState* ErrorState::Create(ErrorStateClient* client, Logger* logger) {
+ return new ErrorStateImpl(client, logger);
+}
+
+ErrorStateImpl::ErrorStateImpl(ErrorStateClient* client, Logger* logger)
+ : error_bits_(0), client_(client), logger_(logger) {}
+
+ErrorStateImpl::~ErrorStateImpl() {}
+
+uint32 ErrorStateImpl::GetGLError() {
+ // Check the GL error first, then our wrapped error.
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR && error_bits_ != 0) {
+ for (uint32 mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ }
+
+ if (error != GL_NO_ERROR) {
+ // There was an error, clear the corresponding wrapped error.
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ }
+ return error;
+}
+
+unsigned int ErrorStateImpl::PeekGLError(
+ const char* filename, int line, const char* function_name) {
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ SetGLError(filename, line, error, function_name, "");
+ }
+ return error;
+}
+
+void ErrorStateImpl::SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) {
+ if (msg) {
+ last_error_ = msg;
+ logger_->LogMessage(
+ filename, line,
+ std::string("GL ERROR :") +
+ GLES2Util::GetStringEnum(error) + " : " +
+ function_name + ": " + msg);
+ }
+ error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
+ if (error == GL_OUT_OF_MEMORY)
+ client_->OnOutOfMemoryError();
+}
+
+void ErrorStateImpl::SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) {
+ SetGLError(filename, line, GL_INVALID_ENUM, function_name,
+ (std::string(label) + " was " +
+ GLES2Util::GetStringEnum(value)).c_str());
+}
+
+void ErrorStateImpl::SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname, int param) {
+ if (error == GL_INVALID_ENUM) {
+ SetGLError(
+ filename, line, GL_INVALID_ENUM, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ GLES2Util::GetStringEnum(param)).c_str());
+ } else {
+ SetGLError(
+ filename, line, error, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ base::StringPrintf("%d", param)).c_str());
+ }
+}
+
+void ErrorStateImpl::SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname, float param) {
+ SetGLError(
+ filename, line, error, function_name,
+ (std::string("trying to set ") +
+ GLES2Util::GetStringEnum(pname) + " to " +
+ base::StringPrintf("%G", param)).c_str());
+}
+
+void ErrorStateImpl::CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) {
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ SetGLError(filename, line, error, function_name,
+ "<- error from previous GL command");
+ }
+}
+
+void ErrorStateImpl::ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) {
+ // Clears and logs all current gl errors.
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ if (error != GL_OUT_OF_MEMORY) {
+ // GL_OUT_OF_MEMORY can legally happen on lost device.
+ logger_->LogMessage(
+ filename, line,
+ std::string("GL ERROR :") +
+ GLES2Util::GetStringEnum(error) + " : " +
+ function_name + ": was unhandled");
+ NOTREACHED() << "GL error " << error << " was unhandled.";
+ }
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/error_state.h b/gpu/command_buffer/service/error_state.h
new file mode 100644
index 0000000..95f118c
--- /dev/null
+++ b/gpu/command_buffer/service/error_state.h
@@ -0,0 +1,128 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ErrorState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+
+#include <stdint.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Logger;
+
+// Use these macro to synthesize GL errors instead of calling the error_state
+// functions directly as they will propogate the __FILE__ and __LINE__.
+
+// Use to synthesize a GL error on the error_state.
+#define ERRORSTATE_SET_GL_ERROR(error_state, error, function_name, msg) \
+ error_state->SetGLError(__FILE__, __LINE__, error, function_name, msg)
+
+// Use to synthesize an INVALID_ENUM GL error on the error_state. Will attempt
+// to expand the enum to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_ENUM( \
+ error_state, function_name, value, label) \
+ error_state->SetGLErrorInvalidEnum( \
+ __FILE__, __LINE__, function_name, value, label)
+
+// Use to synthesize a GL error on the error_state for an invalid enum based
+// integer parameter. Will attempt to expand the parameter to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_PARAMI( \
+ error_state, error, function_name, pname, param) \
+ error_state->SetGLErrorInvalidParami( \
+ __FILE__, __LINE__, error, function_name, pname, param)
+
+// Use to synthesize a GL error on the error_state for an invalid enum based
+// float parameter. Will attempt to expand the parameter to a string.
+#define ERRORSTATE_SET_GL_ERROR_INVALID_PARAMF( \
+ error_state, error, function_name, pname, param) \
+ error_state->SetGLErrorInvalidParamf( \
+ __FILE__, __LINE__, error, function_name, pname, param)
+
+// Use to move all pending error to the wrapper so on your next GL call
+// you can see if that call generates an error.
+#define ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, function_name) \
+ error_state->CopyRealGLErrorsToWrapper(__FILE__, __LINE__, function_name)
+// Use to look at the real GL error and still pass it on to the user.
+#define ERRORSTATE_PEEK_GL_ERROR(error_state, function_name) \
+ error_state->PeekGLError(__FILE__, __LINE__, function_name)
+// Use to clear all current GL errors. FAILS if there are any.
+#define ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state, function_name) \
+ error_state->ClearRealGLErrors(__FILE__, __LINE__, function_name)
+
+class GPU_EXPORT ErrorStateClient {
+ public:
+ // GL_OUT_OF_MEMORY can cause side effects such as losing the context.
+ virtual void OnOutOfMemoryError() = 0;
+};
+
+class GPU_EXPORT ErrorState {
+ public:
+ virtual ~ErrorState();
+
+ static ErrorState* Create(ErrorStateClient* client, Logger* logger);
+
+ virtual uint32_t GetGLError() = 0;
+
+ virtual void SetGLError(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ const char* msg) = 0;
+ virtual void SetGLErrorInvalidEnum(
+ const char* filename,
+ int line,
+ const char* function_name,
+ unsigned int value,
+ const char* label) = 0;
+ virtual void SetGLErrorInvalidParami(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ int param) = 0;
+ virtual void SetGLErrorInvalidParamf(
+ const char* filename,
+ int line,
+ unsigned int error,
+ const char* function_name,
+ unsigned int pname,
+ float param) = 0;
+
+ // Gets the GLError and stores it in our wrapper. Effectively
+ // this lets us peek at the error without losing it.
+ virtual unsigned int PeekGLError(
+ const char* filename, int line, const char* function_name) = 0;
+
+ // Copies the real GL errors to the wrapper. This is so we can
+ // make sure there are no native GL errors before calling some GL function
+ // so that on return we know any error generated was for that specific
+ // command.
+ virtual void CopyRealGLErrorsToWrapper(
+ const char* filename, int line, const char* function_name) = 0;
+
+ // Clear all real GL errors. This is to prevent the client from seeing any
+ // errors caused by GL calls that it was not responsible for issuing.
+ virtual void ClearRealGLErrors(
+ const char* filename, int line, const char* function_name) = 0;
+
+ protected:
+ ErrorState();
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorState);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_H_
+
diff --git a/gpu/command_buffer/service/error_state_mock.cc b/gpu/command_buffer/service/error_state_mock.cc
new file mode 100644
index 0000000..f3925d7
--- /dev/null
+++ b/gpu/command_buffer/service/error_state_mock.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+MockErrorState::MockErrorState()
+ : ErrorState() {}
+
+MockErrorState::~MockErrorState() {}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/error_state_mock.h b/gpu/command_buffer/service/error_state_mock.h
new file mode 100644
index 0000000..eb056f3
--- /dev/null
+++ b/gpu/command_buffer/service/error_state_mock.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the mock ErrorState class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+
+#include "gpu/command_buffer/service/error_state.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MockErrorState : public ErrorState {
+ public:
+ MockErrorState();
+ virtual ~MockErrorState();
+
+ MOCK_METHOD0(GetGLError, uint32_t());
+ MOCK_METHOD5(SetGLError, void(
+ const char* filename, int line,
+ unsigned error, const char* function_name, const char* msg));
+ MOCK_METHOD5(SetGLErrorInvalidEnum, void(
+ const char* filename, int line,
+ const char* function_name, unsigned value, const char* label));
+ MOCK_METHOD6(SetGLErrorInvalidParami, void(
+ const char* filename,
+ int line,
+ unsigned error,
+ const char* function_name,
+ unsigned pname,
+ int param));
+ MOCK_METHOD6(SetGLErrorInvalidParamf, void(
+ const char* filename,
+ int line,
+ unsigned error,
+ const char* function_name,
+ unsigned pname,
+ float param));
+ MOCK_METHOD3(PeekGLError, unsigned(
+ const char* file, int line, const char* filename));
+ MOCK_METHOD3(CopyRealGLErrorsToWrapper, void(
+ const char* file, int line, const char* filename));
+ MOCK_METHOD3(ClearRealGLErrors, void(
+ const char* file, int line, const char* filename));
+
+ DISALLOW_COPY_AND_ASSIGN(MockErrorState);
+};
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ERROR_STATE_MOCK_H_
+
diff --git a/gpu/command_buffer/service/feature_info.cc b/gpu/command_buffer/service/feature_info.cc
new file mode 100644
index 0000000..e853d9b
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info.cc
@@ -0,0 +1,881 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/feature_info.h"
+
+#include <set>
+
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+struct FormatInfo {
+ GLenum format;
+ const GLenum* types;
+ size_t count;
+};
+
+class StringSet {
+ public:
+ StringSet() {}
+
+ StringSet(const char* s) {
+ Init(s);
+ }
+
+ StringSet(const std::string& str) {
+ Init(str);
+ }
+
+ void Init(const char* s) {
+ std::string str(s ? s : "");
+ Init(str);
+ }
+
+ void Init(const std::string& str) {
+ std::vector<std::string> tokens;
+ Tokenize(str, " ", &tokens);
+ string_set_.insert(tokens.begin(), tokens.end());
+ }
+
+ bool Contains(const char* s) {
+ return string_set_.find(s) != string_set_.end();
+ }
+
+ bool Contains(const std::string& s) {
+ return string_set_.find(s) != string_set_.end();
+ }
+
+ private:
+ std::set<std::string> string_set_;
+};
+
+// Process a string of wordaround type IDs (seperated by ',') and set up
+// the corresponding Workaround flags.
+void StringToWorkarounds(
+ const std::string& types, FeatureInfo::Workarounds* workarounds) {
+ DCHECK(workarounds);
+ std::vector<std::string> pieces;
+ base::SplitString(types, ',', &pieces);
+ for (size_t i = 0; i < pieces.size(); ++i) {
+ int number = 0;
+ bool succeed = base::StringToInt(pieces[i], &number);
+ DCHECK(succeed);
+ switch (number) {
+#define GPU_OP(type, name) \
+ case gpu::type: \
+ workarounds->name = true; \
+ break;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ default:
+ NOTIMPLEMENTED();
+ }
+ }
+ if (workarounds->max_texture_size_limit_4096)
+ workarounds->max_texture_size = 4096;
+ if (workarounds->max_cube_map_texture_size_limit_4096)
+ workarounds->max_cube_map_texture_size = 4096;
+ if (workarounds->max_cube_map_texture_size_limit_1024)
+ workarounds->max_cube_map_texture_size = 1024;
+ if (workarounds->max_cube_map_texture_size_limit_512)
+ workarounds->max_cube_map_texture_size = 512;
+
+ if (workarounds->max_fragment_uniform_vectors_32)
+ workarounds->max_fragment_uniform_vectors = 32;
+ if (workarounds->max_varying_vectors_16)
+ workarounds->max_varying_vectors = 16;
+ if (workarounds->max_vertex_uniform_vectors_256)
+ workarounds->max_vertex_uniform_vectors = 256;
+}
+
+} // anonymous namespace.
+
+FeatureInfo::FeatureFlags::FeatureFlags()
+ : chromium_color_buffer_float_rgba(false),
+ chromium_color_buffer_float_rgb(false),
+ chromium_framebuffer_multisample(false),
+ chromium_sync_query(false),
+ use_core_framebuffer_multisample(false),
+ multisampled_render_to_texture(false),
+ use_img_for_multisampled_render_to_texture(false),
+ oes_standard_derivatives(false),
+ oes_egl_image_external(false),
+ oes_depth24(false),
+ oes_compressed_etc1_rgb8_texture(false),
+ packed_depth24_stencil8(false),
+ npot_ok(false),
+ enable_texture_float_linear(false),
+ enable_texture_half_float_linear(false),
+ angle_translated_shader_source(false),
+ angle_pack_reverse_row_order(false),
+ arb_texture_rectangle(false),
+ angle_instanced_arrays(false),
+ occlusion_query_boolean(false),
+ use_arb_occlusion_query2_for_occlusion_query_boolean(false),
+ use_arb_occlusion_query_for_occlusion_query_boolean(false),
+ native_vertex_array_object(false),
+ ext_texture_format_bgra8888(false),
+ enable_shader_name_hashing(false),
+ enable_samplers(false),
+ ext_draw_buffers(false),
+ ext_frag_depth(false),
+ ext_shader_texture_lod(false),
+ use_async_readpixels(false),
+ map_buffer_range(false),
+ ext_discard_framebuffer(false),
+ angle_depth_texture(false),
+ is_angle(false),
+ is_swiftshader(false),
+ angle_texture_usage(false),
+ ext_texture_storage(false),
+ chromium_path_rendering(false) {
+}
+
+FeatureInfo::Workarounds::Workarounds() :
+#define GPU_OP(type, name) name(false),
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ max_texture_size(0),
+ max_cube_map_texture_size(0),
+ max_fragment_uniform_vectors(0),
+ max_varying_vectors(0),
+ max_vertex_uniform_vectors(0) {
+}
+
+FeatureInfo::FeatureInfo() {
+ InitializeBasicState(*CommandLine::ForCurrentProcess());
+}
+
+FeatureInfo::FeatureInfo(const CommandLine& command_line) {
+ InitializeBasicState(command_line);
+}
+
+void FeatureInfo::InitializeBasicState(const CommandLine& command_line) {
+ if (command_line.HasSwitch(switches::kGpuDriverBugWorkarounds)) {
+ std::string types = command_line.GetSwitchValueASCII(
+ switches::kGpuDriverBugWorkarounds);
+ StringToWorkarounds(types, &workarounds_);
+ }
+ feature_flags_.enable_shader_name_hashing =
+ !command_line.HasSwitch(switches::kDisableShaderNameHashing);
+
+ feature_flags_.is_swiftshader =
+ (command_line.GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
+
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ ValueValidator<GLenum>& validator = texture_format_validators_[info.format];
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ validator.AddValue(info.types[jj]);
+ }
+ }
+}
+
+bool FeatureInfo::Initialize() {
+ disallowed_features_ = DisallowedFeatures();
+ InitializeFeatures();
+ return true;
+}
+
+bool FeatureInfo::Initialize(const DisallowedFeatures& disallowed_features) {
+ disallowed_features_ = disallowed_features;
+ InitializeFeatures();
+ return true;
+}
+
+void FeatureInfo::InitializeFeatures() {
+ // Figure out what extensions to turn on.
+ StringSet extensions(
+ reinterpret_cast<const char*>(glGetString(GL_EXTENSIONS)));
+
+ const char* renderer_str =
+ reinterpret_cast<const char*>(glGetString(GL_RENDERER));
+ if (renderer_str) {
+ feature_flags_.is_angle = StartsWithASCII(renderer_str, "ANGLE", true);
+ }
+
+ bool is_es3 = false;
+ const char* version_str =
+ reinterpret_cast<const char*>(glGetString(GL_VERSION));
+ if (version_str) {
+ std::string lstr(base::StringToLowerASCII(std::string(version_str)));
+ is_es3 = (lstr.substr(0, 12) == "opengl es 3.");
+ }
+
+ AddExtensionString("GL_ANGLE_translated_shader_source");
+ AddExtensionString("GL_CHROMIUM_async_pixel_transfers");
+ AddExtensionString("GL_CHROMIUM_bind_uniform_location");
+ AddExtensionString("GL_CHROMIUM_command_buffer_query");
+ AddExtensionString("GL_CHROMIUM_command_buffer_latency_query");
+ AddExtensionString("GL_CHROMIUM_copy_texture");
+ AddExtensionString("GL_CHROMIUM_get_error_query");
+ AddExtensionString("GL_CHROMIUM_lose_context");
+ AddExtensionString("GL_CHROMIUM_pixel_transfer_buffer_object");
+ AddExtensionString("GL_CHROMIUM_rate_limit_offscreen_context");
+ AddExtensionString("GL_CHROMIUM_resize");
+ AddExtensionString("GL_CHROMIUM_resource_safe");
+ AddExtensionString("GL_CHROMIUM_strict_attribs");
+ AddExtensionString("GL_CHROMIUM_texture_mailbox");
+ AddExtensionString("GL_EXT_debug_marker");
+
+ // OES_vertex_array_object is emulated if not present natively,
+ // so the extension string is always exposed.
+ AddExtensionString("GL_OES_vertex_array_object");
+
+ if (!disallowed_features_.gpu_memory_manager)
+ AddExtensionString("GL_CHROMIUM_gpu_memory_manager");
+
+ if (extensions.Contains("GL_ANGLE_translated_shader_source")) {
+ feature_flags_.angle_translated_shader_source = true;
+ }
+
+ // Check if we should allow GL_EXT_texture_compression_dxt1 and
+ // GL_EXT_texture_compression_s3tc.
+ bool enable_dxt1 = false;
+ bool enable_dxt3 = false;
+ bool enable_dxt5 = false;
+ bool have_s3tc = extensions.Contains("GL_EXT_texture_compression_s3tc");
+ bool have_dxt3 =
+ have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt3");
+ bool have_dxt5 =
+ have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt5");
+
+ if (extensions.Contains("GL_EXT_texture_compression_dxt1") || have_s3tc) {
+ enable_dxt1 = true;
+ }
+ if (have_dxt3) {
+ enable_dxt3 = true;
+ }
+ if (have_dxt5) {
+ enable_dxt5 = true;
+ }
+
+ if (enable_dxt1) {
+ AddExtensionString("GL_EXT_texture_compression_dxt1");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT);
+ }
+
+ if (enable_dxt3) {
+ // The difference between GL_EXT_texture_compression_s3tc and
+ // GL_CHROMIUM_texture_compression_dxt3 is that the former
+ // requires on the fly compression. The latter does not.
+ AddExtensionString("GL_CHROMIUM_texture_compression_dxt3");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT);
+ }
+
+ if (enable_dxt5) {
+ // The difference between GL_EXT_texture_compression_s3tc and
+ // GL_CHROMIUM_texture_compression_dxt5 is that the former
+ // requires on the fly compression. The latter does not.
+ AddExtensionString("GL_CHROMIUM_texture_compression_dxt5");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT);
+ }
+
+ // Check if we should enable GL_EXT_texture_filter_anisotropic.
+ if (extensions.Contains("GL_EXT_texture_filter_anisotropic")) {
+ AddExtensionString("GL_EXT_texture_filter_anisotropic");
+ validators_.texture_parameter.AddValue(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT);
+ validators_.g_l_state.AddValue(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT);
+ }
+
+ // Check if we should support GL_OES_packed_depth_stencil and/or
+ // GL_GOOGLE_depth_texture / GL_CHROMIUM_depth_texture.
+ //
+ // NOTE: GL_OES_depth_texture requires support for depth cubemaps.
+ // GL_ARB_depth_texture requires other features that
+ // GL_OES_packed_depth_stencil does not provide.
+ //
+ // Therefore we made up GL_GOOGLE_depth_texture / GL_CHROMIUM_depth_texture.
+ //
+ // GL_GOOGLE_depth_texture is legacy. As we exposed it into NaCl we can't
+ // get rid of it.
+ //
+ bool enable_depth_texture = false;
+ if (!workarounds_.disable_depth_texture &&
+ (extensions.Contains("GL_ARB_depth_texture") ||
+ extensions.Contains("GL_OES_depth_texture") ||
+ extensions.Contains("GL_ANGLE_depth_texture") || is_es3)) {
+ enable_depth_texture = true;
+ feature_flags_.angle_depth_texture =
+ extensions.Contains("GL_ANGLE_depth_texture");
+ }
+
+ if (enable_depth_texture) {
+ AddExtensionString("GL_CHROMIUM_depth_texture");
+ AddExtensionString("GL_GOOGLE_depth_texture");
+ texture_format_validators_[GL_DEPTH_COMPONENT].AddValue(GL_UNSIGNED_SHORT);
+ texture_format_validators_[GL_DEPTH_COMPONENT].AddValue(GL_UNSIGNED_INT);
+ validators_.texture_internal_format.AddValue(GL_DEPTH_COMPONENT);
+ validators_.texture_format.AddValue(GL_DEPTH_COMPONENT);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_INT);
+ }
+
+ if (extensions.Contains("GL_EXT_packed_depth_stencil") ||
+ extensions.Contains("GL_OES_packed_depth_stencil") || is_es3) {
+ AddExtensionString("GL_OES_packed_depth_stencil");
+ feature_flags_.packed_depth24_stencil8 = true;
+ if (enable_depth_texture) {
+ texture_format_validators_[GL_DEPTH_STENCIL]
+ .AddValue(GL_UNSIGNED_INT_24_8);
+ validators_.texture_internal_format.AddValue(GL_DEPTH_STENCIL);
+ validators_.texture_format.AddValue(GL_DEPTH_STENCIL);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_INT_24_8);
+ }
+ validators_.render_buffer_format.AddValue(GL_DEPTH24_STENCIL8);
+ }
+
+ if (is_es3 || extensions.Contains("GL_OES_vertex_array_object") ||
+ extensions.Contains("GL_ARB_vertex_array_object") ||
+ extensions.Contains("GL_APPLE_vertex_array_object")) {
+ feature_flags_.native_vertex_array_object = true;
+ }
+
+ // If we're using client_side_arrays we have to emulate
+ // vertex array objects since vertex array objects do not work
+ // with client side arrays.
+ if (workarounds_.use_client_side_arrays_for_stream_buffers) {
+ feature_flags_.native_vertex_array_object = false;
+ }
+
+ if (is_es3 || extensions.Contains("GL_OES_element_index_uint") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_OES_element_index_uint");
+ validators_.index_type.AddValue(GL_UNSIGNED_INT);
+ }
+
+ bool enable_texture_format_bgra8888 = false;
+ bool enable_read_format_bgra = false;
+ bool enable_render_buffer_bgra = false;
+ bool enable_immutable_texture_format_bgra_on_es3 =
+ extensions.Contains("GL_APPLE_texture_format_BGRA8888");
+
+ // Check if we should allow GL_EXT_texture_format_BGRA8888
+ if (extensions.Contains("GL_EXT_texture_format_BGRA8888") ||
+ enable_immutable_texture_format_bgra_on_es3 ||
+ extensions.Contains("GL_EXT_bgra")) {
+ enable_texture_format_bgra8888 = true;
+ }
+
+ if (extensions.Contains("GL_EXT_bgra")) {
+ enable_render_buffer_bgra = true;
+ }
+
+ if (extensions.Contains("GL_EXT_read_format_bgra") ||
+ extensions.Contains("GL_EXT_bgra")) {
+ enable_read_format_bgra = true;
+ }
+
+ if (enable_texture_format_bgra8888) {
+ feature_flags_.ext_texture_format_bgra8888 = true;
+ AddExtensionString("GL_EXT_texture_format_BGRA8888");
+ texture_format_validators_[GL_BGRA_EXT].AddValue(GL_UNSIGNED_BYTE);
+ validators_.texture_internal_format.AddValue(GL_BGRA_EXT);
+ validators_.texture_format.AddValue(GL_BGRA_EXT);
+ }
+
+ if (enable_read_format_bgra) {
+ AddExtensionString("GL_EXT_read_format_bgra");
+ validators_.read_pixel_format.AddValue(GL_BGRA_EXT);
+ }
+
+ if (enable_render_buffer_bgra) {
+ AddExtensionString("GL_CHROMIUM_renderbuffer_format_BGRA8888");
+ validators_.render_buffer_format.AddValue(GL_BGRA8_EXT);
+ }
+
+ if (extensions.Contains("GL_OES_rgb8_rgba8") || gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_OES_rgb8_rgba8");
+ validators_.render_buffer_format.AddValue(GL_RGB8_OES);
+ validators_.render_buffer_format.AddValue(GL_RGBA8_OES);
+ }
+
+ // Check if we should allow GL_OES_texture_npot
+ if (is_es3 || extensions.Contains("GL_ARB_texture_non_power_of_two") ||
+ extensions.Contains("GL_OES_texture_npot")) {
+ AddExtensionString("GL_OES_texture_npot");
+ feature_flags_.npot_ok = true;
+ }
+
+ // Check if we should allow GL_OES_texture_float, GL_OES_texture_half_float,
+ // GL_OES_texture_float_linear, GL_OES_texture_half_float_linear
+ bool enable_texture_float = false;
+ bool enable_texture_float_linear = false;
+ bool enable_texture_half_float = false;
+ bool enable_texture_half_float_linear = false;
+
+ bool may_enable_chromium_color_buffer_float = false;
+
+ if (extensions.Contains("GL_ARB_texture_float")) {
+ enable_texture_float = true;
+ enable_texture_float_linear = true;
+ enable_texture_half_float = true;
+ enable_texture_half_float_linear = true;
+ may_enable_chromium_color_buffer_float = true;
+ } else {
+ if (is_es3 || extensions.Contains("GL_OES_texture_float")) {
+ enable_texture_float = true;
+ if (extensions.Contains("GL_OES_texture_float_linear")) {
+ enable_texture_float_linear = true;
+ }
+ if ((is_es3 && extensions.Contains("GL_EXT_color_buffer_float")) ||
+ feature_flags_.is_angle) {
+ may_enable_chromium_color_buffer_float = true;
+ }
+ }
+ // TODO(dshwang): GLES3 supports half float by default but GL_HALF_FLOAT_OES
+ // isn't equal to GL_HALF_FLOAT.
+ if (extensions.Contains("GL_OES_texture_half_float")) {
+ enable_texture_half_float = true;
+ if (extensions.Contains("GL_OES_texture_half_float_linear")) {
+ enable_texture_half_float_linear = true;
+ }
+ }
+ }
+
+ if (enable_texture_float) {
+ texture_format_validators_[GL_ALPHA].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_RGB].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_RGBA].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_LUMINANCE].AddValue(GL_FLOAT);
+ texture_format_validators_[GL_LUMINANCE_ALPHA].AddValue(GL_FLOAT);
+ validators_.pixel_type.AddValue(GL_FLOAT);
+ validators_.read_pixel_type.AddValue(GL_FLOAT);
+ AddExtensionString("GL_OES_texture_float");
+ if (enable_texture_float_linear) {
+ AddExtensionString("GL_OES_texture_float_linear");
+ }
+ }
+
+ if (enable_texture_half_float) {
+ texture_format_validators_[GL_ALPHA].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_RGB].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_RGBA].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_LUMINANCE].AddValue(GL_HALF_FLOAT_OES);
+ texture_format_validators_[GL_LUMINANCE_ALPHA].AddValue(GL_HALF_FLOAT_OES);
+ validators_.pixel_type.AddValue(GL_HALF_FLOAT_OES);
+ validators_.read_pixel_type.AddValue(GL_HALF_FLOAT_OES);
+ AddExtensionString("GL_OES_texture_half_float");
+ if (enable_texture_half_float_linear) {
+ AddExtensionString("GL_OES_texture_half_float_linear");
+ }
+ }
+
+ if (may_enable_chromium_color_buffer_float) {
+ COMPILE_ASSERT(GL_RGBA32F_ARB == GL_RGBA32F &&
+ GL_RGBA32F_EXT == GL_RGBA32F &&
+ GL_RGB32F_ARB == GL_RGB32F &&
+ GL_RGB32F_EXT == GL_RGB32F,
+ sized_float_internal_format_variations_must_match);
+ // We don't check extension support beyond ARB_texture_float on desktop GL,
+ // and format support varies between GL configurations. For example, spec
+ // prior to OpenGL 3.0 mandates framebuffer support only for one
+ // implementation-chosen format, and ES3.0 EXT_color_buffer_float does not
+ // support rendering to RGB32F. Check for framebuffer completeness with
+ // formats that the extensions expose, and only enable an extension when a
+ // framebuffer created with its texture format is reported as complete.
+ GLint fb_binding = 0;
+ GLint tex_binding = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &fb_binding);
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &tex_binding);
+
+ GLuint tex_id = 0;
+ GLuint fb_id = 0;
+ GLsizei width = 16;
+
+ glGenTextures(1, &tex_id);
+ glGenFramebuffersEXT(1, &fb_id);
+ glBindTexture(GL_TEXTURE_2D, tex_id);
+ // Nearest filter needed for framebuffer completeness on some drivers.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, width, 0, GL_RGBA,
+ GL_FLOAT, NULL);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, fb_id);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, tex_id, 0);
+ GLenum statusRGBA = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, width, 0, GL_RGB,
+ GL_FLOAT, NULL);
+ GLenum statusRGB = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ glDeleteFramebuffersEXT(1, &fb_id);
+ glDeleteTextures(1, &tex_id);
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, static_cast<GLuint>(fb_binding));
+ glBindTexture(GL_TEXTURE_2D, static_cast<GLuint>(tex_binding));
+
+ DCHECK(glGetError() == GL_NO_ERROR);
+
+ if (statusRGBA == GL_FRAMEBUFFER_COMPLETE) {
+ validators_.texture_internal_format.AddValue(GL_RGBA32F);
+ feature_flags_.chromium_color_buffer_float_rgba = true;
+ AddExtensionString("GL_CHROMIUM_color_buffer_float_rgba");
+ }
+ if (statusRGB == GL_FRAMEBUFFER_COMPLETE) {
+ validators_.texture_internal_format.AddValue(GL_RGB32F);
+ feature_flags_.chromium_color_buffer_float_rgb = true;
+ AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb");
+ }
+ }
+
+ // Check for multisample support
+ if (!workarounds_.disable_multisampling) {
+ bool ext_has_multisample =
+ extensions.Contains("GL_EXT_framebuffer_multisample") || is_es3;
+ if (feature_flags_.is_angle) {
+ ext_has_multisample |=
+ extensions.Contains("GL_ANGLE_framebuffer_multisample");
+ }
+ feature_flags_.use_core_framebuffer_multisample = is_es3;
+ if (ext_has_multisample) {
+ feature_flags_.chromium_framebuffer_multisample = true;
+ validators_.frame_buffer_target.AddValue(GL_READ_FRAMEBUFFER_EXT);
+ validators_.frame_buffer_target.AddValue(GL_DRAW_FRAMEBUFFER_EXT);
+ validators_.g_l_state.AddValue(GL_READ_FRAMEBUFFER_BINDING_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_SAMPLES_EXT);
+ validators_.render_buffer_parameter.AddValue(GL_RENDERBUFFER_SAMPLES_EXT);
+ AddExtensionString("GL_CHROMIUM_framebuffer_multisample");
+ }
+ if (extensions.Contains("GL_EXT_multisampled_render_to_texture")) {
+ feature_flags_.multisampled_render_to_texture = true;
+ } else if (extensions.Contains("GL_IMG_multisampled_render_to_texture")) {
+ feature_flags_.multisampled_render_to_texture = true;
+ feature_flags_.use_img_for_multisampled_render_to_texture = true;
+ }
+ if (feature_flags_.multisampled_render_to_texture) {
+ validators_.render_buffer_parameter.AddValue(
+ GL_RENDERBUFFER_SAMPLES_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_SAMPLES_EXT);
+ validators_.frame_buffer_parameter.AddValue(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT);
+ AddExtensionString("GL_EXT_multisampled_render_to_texture");
+ }
+ }
+
+ if (extensions.Contains("GL_OES_depth24") || gfx::HasDesktopGLFeatures() ||
+ is_es3) {
+ AddExtensionString("GL_OES_depth24");
+ feature_flags_.oes_depth24 = true;
+ validators_.render_buffer_format.AddValue(GL_DEPTH_COMPONENT24);
+ }
+
+ if (!workarounds_.disable_oes_standard_derivatives &&
+ (is_es3 || extensions.Contains("GL_OES_standard_derivatives") ||
+ gfx::HasDesktopGLFeatures())) {
+ AddExtensionString("GL_OES_standard_derivatives");
+ feature_flags_.oes_standard_derivatives = true;
+ validators_.hint_target.AddValue(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES);
+ validators_.g_l_state.AddValue(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES);
+ }
+
+ if (extensions.Contains("GL_OES_EGL_image_external")) {
+ AddExtensionString("GL_OES_EGL_image_external");
+ feature_flags_.oes_egl_image_external = true;
+ validators_.texture_bind_target.AddValue(GL_TEXTURE_EXTERNAL_OES);
+ validators_.get_tex_param_target.AddValue(GL_TEXTURE_EXTERNAL_OES);
+ validators_.texture_parameter.AddValue(GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES);
+ validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_EXTERNAL_OES);
+ }
+
+ if (extensions.Contains("GL_OES_compressed_ETC1_RGB8_texture")) {
+ AddExtensionString("GL_OES_compressed_ETC1_RGB8_texture");
+ feature_flags_.oes_compressed_etc1_rgb8_texture = true;
+ validators_.compressed_texture_format.AddValue(GL_ETC1_RGB8_OES);
+ }
+
+ if (extensions.Contains("GL_AMD_compressed_ATC_texture")) {
+ AddExtensionString("GL_AMD_compressed_ATC_texture");
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGB_AMD);
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD);
+ validators_.compressed_texture_format.AddValue(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD);
+ }
+
+ if (extensions.Contains("GL_IMG_texture_compression_pvrtc")) {
+ AddExtensionString("GL_IMG_texture_compression_pvrtc");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG);
+ }
+
+ // Ideally we would only expose this extension on Mac OS X, to
+ // support GL_CHROMIUM_iosurface and the compositor. We don't want
+ // applications to start using it; they should use ordinary non-
+ // power-of-two textures. However, for unit testing purposes we
+ // expose it on all supported platforms.
+ if (extensions.Contains("GL_ARB_texture_rectangle")) {
+ AddExtensionString("GL_ARB_texture_rectangle");
+ feature_flags_.arb_texture_rectangle = true;
+ validators_.texture_bind_target.AddValue(GL_TEXTURE_RECTANGLE_ARB);
+ // For the moment we don't add this enum to the texture_target
+ // validator. This implies that the only way to get image data into a
+ // rectangular texture is via glTexImageIOSurface2DCHROMIUM, which is
+ // just fine since again we don't want applications depending on this
+ // extension.
+ validators_.get_tex_param_target.AddValue(GL_TEXTURE_RECTANGLE_ARB);
+ validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_RECTANGLE_ARB);
+ }
+
+#if defined(OS_MACOSX)
+ AddExtensionString("GL_CHROMIUM_iosurface");
+#endif
+
+ // TODO(gman): Add support for these extensions.
+ // GL_OES_depth32
+
+ feature_flags_.enable_texture_float_linear |= enable_texture_float_linear;
+ feature_flags_.enable_texture_half_float_linear |=
+ enable_texture_half_float_linear;
+
+ if (extensions.Contains("GL_ANGLE_pack_reverse_row_order")) {
+ AddExtensionString("GL_ANGLE_pack_reverse_row_order");
+ feature_flags_.angle_pack_reverse_row_order = true;
+ validators_.pixel_store.AddValue(GL_PACK_REVERSE_ROW_ORDER_ANGLE);
+ validators_.g_l_state.AddValue(GL_PACK_REVERSE_ROW_ORDER_ANGLE);
+ }
+
+ if (extensions.Contains("GL_ANGLE_texture_usage")) {
+ feature_flags_.angle_texture_usage = true;
+ AddExtensionString("GL_ANGLE_texture_usage");
+ validators_.texture_parameter.AddValue(GL_TEXTURE_USAGE_ANGLE);
+ }
+
+ // Note: Only APPLE_texture_format_BGRA8888 extension allows BGRA8_EXT in
+ // ES3's glTexStorage2D. We prefer support BGRA to texture storage.
+ // So we don't expose GL_EXT_texture_storage when ES3 +
+ // GL_EXT_texture_format_BGRA8888 because we fail the GL_BGRA8 requirement.
+ // However we expose GL_EXT_texture_storage when just ES3 because we don't
+ // claim to handle GL_BGRA8.
+ bool support_texture_storage_on_es3 =
+ (is_es3 && enable_immutable_texture_format_bgra_on_es3) ||
+ (is_es3 && !enable_texture_format_bgra8888);
+ if (extensions.Contains("GL_EXT_texture_storage") ||
+ extensions.Contains("GL_ARB_texture_storage") ||
+ support_texture_storage_on_es3) {
+ feature_flags_.ext_texture_storage = true;
+ AddExtensionString("GL_EXT_texture_storage");
+ validators_.texture_parameter.AddValue(GL_TEXTURE_IMMUTABLE_FORMAT_EXT);
+ if (enable_texture_format_bgra8888)
+ validators_.texture_internal_format_storage.AddValue(GL_BGRA8_EXT);
+ if (enable_texture_float) {
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_ALPHA32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE32F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE_ALPHA32F_EXT);
+ }
+ if (enable_texture_half_float) {
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_ALPHA16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE16F_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_LUMINANCE_ALPHA16F_EXT);
+ }
+ }
+
+ bool have_ext_occlusion_query_boolean =
+ extensions.Contains("GL_EXT_occlusion_query_boolean");
+ bool have_arb_occlusion_query2 =
+ extensions.Contains("GL_ARB_occlusion_query2");
+ bool have_arb_occlusion_query =
+ extensions.Contains("GL_ARB_occlusion_query");
+
+ if (!workarounds_.disable_ext_occlusion_query &&
+ (have_ext_occlusion_query_boolean ||
+ have_arb_occlusion_query2 ||
+ have_arb_occlusion_query)) {
+ AddExtensionString("GL_EXT_occlusion_query_boolean");
+ feature_flags_.occlusion_query_boolean = true;
+ feature_flags_.use_arb_occlusion_query2_for_occlusion_query_boolean =
+ !have_ext_occlusion_query_boolean && have_arb_occlusion_query2;
+ feature_flags_.use_arb_occlusion_query_for_occlusion_query_boolean =
+ !have_ext_occlusion_query_boolean && have_arb_occlusion_query &&
+ !have_arb_occlusion_query2;
+ }
+
+ if (!workarounds_.disable_angle_instanced_arrays &&
+ (extensions.Contains("GL_ANGLE_instanced_arrays") ||
+ (extensions.Contains("GL_ARB_instanced_arrays") &&
+ extensions.Contains("GL_ARB_draw_instanced")) ||
+ is_es3)) {
+ AddExtensionString("GL_ANGLE_instanced_arrays");
+ feature_flags_.angle_instanced_arrays = true;
+ validators_.vertex_attribute.AddValue(GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE);
+ }
+
+ if (!workarounds_.disable_ext_draw_buffers &&
+ (extensions.Contains("GL_ARB_draw_buffers") ||
+ extensions.Contains("GL_EXT_draw_buffers"))) {
+ AddExtensionString("GL_EXT_draw_buffers");
+ feature_flags_.ext_draw_buffers = true;
+
+ GLint max_color_attachments = 0;
+ glGetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, &max_color_attachments);
+ for (GLenum i = GL_COLOR_ATTACHMENT1_EXT;
+ i < static_cast<GLenum>(GL_COLOR_ATTACHMENT0 + max_color_attachments);
+ ++i) {
+ validators_.attachment.AddValue(i);
+ }
+ COMPILE_ASSERT(GL_COLOR_ATTACHMENT0_EXT == GL_COLOR_ATTACHMENT0,
+ color_attachment0_variation_must_match);
+
+ validators_.g_l_state.AddValue(GL_MAX_COLOR_ATTACHMENTS_EXT);
+ validators_.g_l_state.AddValue(GL_MAX_DRAW_BUFFERS_ARB);
+ GLint max_draw_buffers = 0;
+ glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &max_draw_buffers);
+ for (GLenum i = GL_DRAW_BUFFER0_ARB;
+ i < static_cast<GLenum>(GL_DRAW_BUFFER0_ARB + max_draw_buffers);
+ ++i) {
+ validators_.g_l_state.AddValue(i);
+ }
+ }
+
+ if (is_es3 || extensions.Contains("GL_EXT_blend_minmax") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_blend_minmax");
+ validators_.equation.AddValue(GL_MIN_EXT);
+ validators_.equation.AddValue(GL_MAX_EXT);
+ COMPILE_ASSERT(GL_MIN_EXT == GL_MIN && GL_MAX_EXT == GL_MAX,
+ min_max_variations_must_match);
+ }
+
+ // TODO(dshwang): GLES3 supports gl_FragDepth, not gl_FragDepthEXT.
+ if (extensions.Contains("GL_EXT_frag_depth") || gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_frag_depth");
+ feature_flags_.ext_frag_depth = true;
+ }
+
+ if (extensions.Contains("GL_EXT_shader_texture_lod") ||
+ gfx::HasDesktopGLFeatures()) {
+ AddExtensionString("GL_EXT_shader_texture_lod");
+ feature_flags_.ext_shader_texture_lod = true;
+ }
+
+#if !defined(OS_MACOSX)
+ if (workarounds_.disable_egl_khr_fence_sync) {
+ gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync = false;
+ }
+#endif
+ if (workarounds_.disable_arb_sync)
+ gfx::g_driver_gl.ext.b_GL_ARB_sync = false;
+ bool ui_gl_fence_works = gfx::GLFence::IsSupported();
+ UMA_HISTOGRAM_BOOLEAN("GPU.FenceSupport", ui_gl_fence_works);
+
+ feature_flags_.map_buffer_range =
+ is_es3 || extensions.Contains("GL_ARB_map_buffer_range");
+
+ // Really it's part of core OpenGL 2.1 and up, but let's assume the
+ // extension is still advertised.
+ bool has_pixel_buffers =
+ is_es3 || extensions.Contains("GL_ARB_pixel_buffer_object");
+
+ // We will use either glMapBuffer() or glMapBufferRange() for async readbacks.
+ if (has_pixel_buffers && ui_gl_fence_works &&
+ !workarounds_.disable_async_readpixels) {
+ feature_flags_.use_async_readpixels = true;
+ }
+
+ if (is_es3 || extensions.Contains("GL_ARB_sampler_objects")) {
+ feature_flags_.enable_samplers = true;
+ // TODO(dsinclair): Add AddExtensionString("GL_CHROMIUM_sampler_objects")
+ // when available.
+ }
+
+ if ((is_es3 || extensions.Contains("GL_EXT_discard_framebuffer")) &&
+ !workarounds_.disable_ext_discard_framebuffer) {
+ // DiscardFramebufferEXT is automatically bound to InvalidateFramebuffer.
+ AddExtensionString("GL_EXT_discard_framebuffer");
+ feature_flags_.ext_discard_framebuffer = true;
+ }
+
+ if (ui_gl_fence_works) {
+ AddExtensionString("GL_CHROMIUM_sync_query");
+ feature_flags_.chromium_sync_query = true;
+ }
+
+ if (extensions.Contains("GL_NV_path_rendering")) {
+ if (extensions.Contains("GL_EXT_direct_state_access") || is_es3) {
+ AddExtensionString("GL_CHROMIUM_path_rendering");
+ feature_flags_.chromium_path_rendering = true;
+ validators_.g_l_state.AddValue(GL_PATH_MODELVIEW_MATRIX_CHROMIUM);
+ validators_.g_l_state.AddValue(GL_PATH_PROJECTION_MATRIX_CHROMIUM);
+ }
+ }
+}
+
+void FeatureInfo::AddExtensionString(const char* s) {
+ std::string str(s);
+ size_t pos = extensions_.find(str);
+ while (pos != std::string::npos &&
+ pos + str.length() < extensions_.length() &&
+ extensions_.substr(pos + str.length(), 1) != " ") {
+ // This extension name is a substring of another.
+ pos = extensions_.find(str, pos + str.length());
+ }
+ if (pos == std::string::npos) {
+ extensions_ += (extensions_.empty() ? "" : " ") + str;
+ }
+}
+
+FeatureInfo::~FeatureInfo() {
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/feature_info.h b/gpu/command_buffer/service/feature_info.h
new file mode 100644
index 0000000..740b833
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info.h
@@ -0,0 +1,152 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
+#define GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
+
+#include <set>
+#include <string>
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/sys_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "gpu/gpu_export.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// FeatureInfo records the features that are available for a ContextGroup.
+class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
+ public:
+ struct FeatureFlags {
+ FeatureFlags();
+
+ bool chromium_color_buffer_float_rgba;
+ bool chromium_color_buffer_float_rgb;
+ bool chromium_framebuffer_multisample;
+ bool chromium_sync_query;
+ // Use glBlitFramebuffer() and glRenderbufferStorageMultisample() with
+ // GL_EXT_framebuffer_multisample-style semantics, since they are exposed
+ // as core GL functions on this implementation.
+ bool use_core_framebuffer_multisample;
+ bool multisampled_render_to_texture;
+ // Use the IMG GLenum values and functions rather than EXT.
+ bool use_img_for_multisampled_render_to_texture;
+ bool oes_standard_derivatives;
+ bool oes_egl_image_external;
+ bool oes_depth24;
+ bool oes_compressed_etc1_rgb8_texture;
+ bool packed_depth24_stencil8;
+ bool npot_ok;
+ bool enable_texture_float_linear;
+ bool enable_texture_half_float_linear;
+ bool angle_translated_shader_source;
+ bool angle_pack_reverse_row_order;
+ bool arb_texture_rectangle;
+ bool angle_instanced_arrays;
+ bool occlusion_query_boolean;
+ bool use_arb_occlusion_query2_for_occlusion_query_boolean;
+ bool use_arb_occlusion_query_for_occlusion_query_boolean;
+ bool native_vertex_array_object;
+ bool ext_texture_format_bgra8888;
+ bool enable_shader_name_hashing;
+ bool enable_samplers;
+ bool ext_draw_buffers;
+ bool ext_frag_depth;
+ bool ext_shader_texture_lod;
+ bool use_async_readpixels;
+ bool map_buffer_range;
+ bool ext_discard_framebuffer;
+ bool angle_depth_texture;
+ bool is_angle;
+ bool is_swiftshader;
+ bool angle_texture_usage;
+ bool ext_texture_storage;
+ bool chromium_path_rendering;
+ };
+
+ struct Workarounds {
+ Workarounds();
+
+#define GPU_OP(type, name) bool name;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+
+ // Note: 0 here means use driver limit.
+ GLint max_texture_size;
+ GLint max_cube_map_texture_size;
+ GLint max_fragment_uniform_vectors;
+ GLint max_varying_vectors;
+ GLint max_vertex_uniform_vectors;
+ };
+
+ // Constructor with workarounds taken from the current process's CommandLine
+ FeatureInfo();
+
+ // Constructor with workarounds taken from |command_line|
+ FeatureInfo(const base::CommandLine& command_line);
+
+ // Initializes the feature information. Needs a current GL context.
+ bool Initialize();
+ bool Initialize(const DisallowedFeatures& disallowed_features);
+
+ const Validators* validators() const {
+ return &validators_;
+ }
+
+ const ValueValidator<GLenum>& GetTextureFormatValidator(GLenum format) {
+ return texture_format_validators_[format];
+ }
+
+ const std::string& extensions() const {
+ return extensions_;
+ }
+
+ const FeatureFlags& feature_flags() const {
+ return feature_flags_;
+ }
+
+ const Workarounds& workarounds() const {
+ return workarounds_;
+ }
+
+ private:
+ friend class base::RefCounted<FeatureInfo>;
+ friend class BufferManagerClientSideArraysTest;
+
+ typedef base::hash_map<GLenum, ValueValidator<GLenum> > ValidatorMap;
+ ValidatorMap texture_format_validators_;
+
+ ~FeatureInfo();
+
+ void AddExtensionString(const char* s);
+ void InitializeBasicState(const base::CommandLine& command_line);
+ void InitializeFeatures();
+
+ Validators validators_;
+
+ DisallowedFeatures disallowed_features_;
+
+ // The extensions string returned by glGetString(GL_EXTENSIONS);
+ std::string extensions_;
+
+ // Flags for some features
+ FeatureFlags feature_flags_;
+
+ // Flags for Workarounds.
+ Workarounds workarounds_;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureInfo);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_FEATURE_INFO_H_
diff --git a/gpu/command_buffer/service/feature_info_unittest.cc b/gpu/command_buffer/service/feature_info_unittest.cc
new file mode 100644
index 0000000..937dd1e
--- /dev/null
+++ b/gpu/command_buffer/service/feature_info_unittest.cc
@@ -0,0 +1,1296 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/feature_info.h"
+
+#include "base/command_line.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Not;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+const char kGLRendererStringANGLE[] = "ANGLE (some renderer)";
+} // anonymous namespace
+
+class FeatureInfoTest : public GpuServiceTest {
+ public:
+ FeatureInfoTest() {
+ }
+
+ void SetupInitExpectations(const char* extensions) {
+ SetupInitExpectationsWithGLVersion(extensions, "", "");
+ }
+
+ void SetupInitExpectationsWithGLVersion(
+ const char* extensions, const char* renderer, const char* version) {
+ GpuServiceTest::SetUpWithGLVersion(version, extensions);
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ gl_.get(), extensions, renderer, version);
+ info_ = new FeatureInfo();
+ info_->Initialize();
+ }
+
+ void SetupWithCommandLine(const CommandLine& command_line) {
+ GpuServiceTest::SetUp();
+ info_ = new FeatureInfo(command_line);
+ }
+
+ void SetupInitExpectationsWithCommandLine(
+ const char* extensions, const CommandLine& command_line) {
+ GpuServiceTest::SetUpWithGLVersion("2.0", extensions);
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ gl_.get(), extensions, "", "");
+ info_ = new FeatureInfo(command_line);
+ info_->Initialize();
+ }
+
+ void SetupWithoutInit() {
+ GpuServiceTest::SetUp();
+ info_ = new FeatureInfo();
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE {
+ // Do nothing here, since we are using the explicit Setup*() functions.
+ }
+
+ virtual void TearDown() OVERRIDE {
+ info_ = NULL;
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_refptr<FeatureInfo> info_;
+};
+
+namespace {
+
+struct FormatInfo {
+ GLenum format;
+ const GLenum* types;
+ size_t count;
+};
+
+} // anonymous namespace.
+
+TEST_F(FeatureInfoTest, Basic) {
+ SetupWithoutInit();
+ // Test it starts off uninitialized.
+ EXPECT_FALSE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_FALSE(info_->feature_flags().use_core_framebuffer_multisample);
+ EXPECT_FALSE(info_->feature_flags().multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_FALSE(info_->feature_flags().npot_ok);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_FALSE(info_->feature_flags().oes_egl_image_external);
+ EXPECT_FALSE(info_->feature_flags().oes_depth24);
+ EXPECT_FALSE(info_->feature_flags().packed_depth24_stencil8);
+ EXPECT_FALSE(info_->feature_flags().angle_translated_shader_source);
+ EXPECT_FALSE(info_->feature_flags().angle_pack_reverse_row_order);
+ EXPECT_FALSE(info_->feature_flags().arb_texture_rectangle);
+ EXPECT_FALSE(info_->feature_flags().angle_instanced_arrays);
+ EXPECT_FALSE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+ EXPECT_FALSE(info_->feature_flags().map_buffer_range);
+ EXPECT_FALSE(info_->feature_flags().use_async_readpixels);
+ EXPECT_FALSE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_FALSE(info_->feature_flags().angle_depth_texture);
+ EXPECT_FALSE(info_->feature_flags().is_angle);
+
+#define GPU_OP(type, name) EXPECT_FALSE(info_->workarounds().name);
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ EXPECT_EQ(0, info_->workarounds().max_texture_size);
+ EXPECT_EQ(0, info_->workarounds().max_cube_map_texture_size);
+
+ // Test good types.
+ {
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_BYTE,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ const ValueValidator<GLenum>& validator =
+ info_->GetTextureFormatValidator(info.format);
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ EXPECT_TRUE(validator.IsValid(info.types[jj]));
+ }
+ }
+ }
+
+ // Test some bad types
+ {
+ static const GLenum kAlphaTypes[] = {
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ GL_FLOAT,
+ };
+ static const GLenum kRGBTypes[] = {
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_FLOAT,
+ };
+ static const GLenum kRGBATypes[] = {
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_FLOAT,
+ };
+ static const GLenum kLuminanceTypes[] = {
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_FLOAT,
+ };
+ static const GLenum kLuminanceAlphaTypes[] = {
+ GL_UNSIGNED_SHORT_5_5_5_1,
+ GL_FLOAT,
+ };
+ static const GLenum kBGRATypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_FLOAT,
+ };
+ static const GLenum kDepthTypes[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+ GL_UNSIGNED_INT,
+ GL_FLOAT,
+ };
+ static const FormatInfo kFormatTypes[] = {
+ { GL_ALPHA, kAlphaTypes, arraysize(kAlphaTypes), },
+ { GL_RGB, kRGBTypes, arraysize(kRGBTypes), },
+ { GL_RGBA, kRGBATypes, arraysize(kRGBATypes), },
+ { GL_LUMINANCE, kLuminanceTypes, arraysize(kLuminanceTypes), },
+ { GL_LUMINANCE_ALPHA, kLuminanceAlphaTypes,
+ arraysize(kLuminanceAlphaTypes), } ,
+ { GL_BGRA_EXT, kBGRATypes, arraysize(kBGRATypes), },
+ { GL_DEPTH_COMPONENT, kDepthTypes, arraysize(kDepthTypes), },
+ };
+ for (size_t ii = 0; ii < arraysize(kFormatTypes); ++ii) {
+ const FormatInfo& info = kFormatTypes[ii];
+ const ValueValidator<GLenum>& validator =
+ info_->GetTextureFormatValidator(info.format);
+ for (size_t jj = 0; jj < info.count; ++jj) {
+ EXPECT_FALSE(validator.IsValid(info.types[jj]));
+ }
+ }
+ }
+}
+
+TEST_F(FeatureInfoTest, InitializeNoExtensions) {
+ SetupInitExpectations("");
+ // Check default extensions are there
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_resource_safe"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_strict_attribs"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_ANGLE_translated_shader_source"));
+
+ // Check a couple of random extensions that should not be there.
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_npot")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_compression_dxt1")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_texture_compression_dxt3")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_texture_compression_dxt5")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_ANGLE_texture_usage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_compressed_ETC1_RGB8_texture")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_AMD_compressed_ATC_texture")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_IMG_texture_compression_pvrtc")));
+ EXPECT_FALSE(info_->feature_flags().npot_ok);
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGB_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG));
+ EXPECT_FALSE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGBA32F));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGB32F));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(
+ GL_UNSIGNED_INT_24_8));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH_COMPONENT24));
+ EXPECT_FALSE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_USAGE_ANGLE));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT16));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT32_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH24_STENCIL8_OES));
+ EXPECT_FALSE(info_->validators()->equation.IsValid(GL_MIN_EXT));
+ EXPECT_FALSE(info_->validators()->equation.IsValid(GL_MAX_EXT));
+ EXPECT_FALSE(info_->feature_flags().chromium_sync_query);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithANGLE) {
+ SetupInitExpectationsWithGLVersion("", kGLRendererStringANGLE, "");
+ EXPECT_TRUE(info_->feature_flags().is_angle);
+}
+
+TEST_F(FeatureInfoTest, InitializeNPOTExtensionGLES) {
+ SetupInitExpectations("GL_OES_texture_npot");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_npot"));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+}
+
+TEST_F(FeatureInfoTest, InitializeNPOTExtensionGL) {
+ SetupInitExpectations("GL_ARB_texture_non_power_of_two");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_npot"));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+}
+
+TEST_F(FeatureInfoTest, InitializeDXTExtensionGLES2) {
+ SetupInitExpectations("GL_EXT_texture_compression_dxt1");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_compression_dxt1"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_FALSE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeDXTExtensionGL) {
+ SetupInitExpectations("GL_EXT_texture_compression_s3tc");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_compression_dxt1"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_texture_compression_dxt3"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_texture_compression_dxt5"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888GLES2) {
+ SetupInitExpectations("GL_EXT_texture_format_BGRA8888");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888GL) {
+ SetupInitExpectations("GL_EXT_bgra");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_read_format_bgra"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_renderbuffer_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_format_BGRA8888Apple) {
+ SetupInitExpectations("GL_APPLE_texture_format_BGRA8888");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_BGRA_EXT).IsValid(
+ GL_UNSIGNED_BYTE));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_read_format_bgra) {
+ SetupInitExpectations("GL_EXT_read_format_bgra");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_read_format_bgra"));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(
+ GL_BGRA_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_format.IsValid(
+ GL_BGRA8_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage) {
+ SetupInitExpectations("GL_EXT_texture_storage");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_IMMUTABLE_FORMAT_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA32F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE16F_EXT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA16F_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_storage) {
+ SetupInitExpectations("GL_ARB_texture_storage");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_IMMUTABLE_FORMAT_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_BGRA) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_EXT_bgra");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_storage_BGRA) {
+ SetupInitExpectations("GL_ARB_texture_storage GL_EXT_bgra");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_BGRA8888) {
+ SetupInitExpectations(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888");
+ EXPECT_TRUE(info_->feature_flags().ext_texture_storage);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_BGRA8_EXT));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_float) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_OES_texture_float");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE32F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA32F_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_storage_half_float) {
+ SetupInitExpectations("GL_EXT_texture_storage GL_OES_texture_half_float");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_ALPHA16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE16F_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_LUMINANCE_ALPHA16F_EXT));
+}
+
+// Check how to handle ES, texture_storage and BGRA combination; 8 tests.
+
+// 1- ES2 + GL_EXT_texture_storage -> GL_EXT_texture_storage (and no
+// GL_EXT_texture_format_BGRA8888 - we don't claim to handle GL_BGRA8 in
+// glTexStorage2DEXT)
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_storage) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+// 2- ES2 + GL_EXT_texture_storage + (GL_EXT_texture_format_BGRA8888 or
+// GL_APPLE_texture_format_bgra8888)
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_storage_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888",
+ "",
+ "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 3- ES2 + GL_EXT_texture_format_BGRA8888 or GL_APPLE_texture_format_bgra8888
+TEST_F(FeatureInfoTest, InitializeGLES2_texture_format_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_format_BGRA8888", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 4- ES2 (neither GL_EXT_texture_storage nor GL_EXT_texture_format_BGRA8888) ->
+// nothing
+TEST_F(FeatureInfoTest, InitializeGLES2_neither_texture_storage_nor_BGRA) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 2.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+// 5- ES3 + GL_EXT_texture_format_BGRA8888 -> GL_EXT_texture_format_BGRA8888
+// (we can't expose GL_EXT_texture_storage because we fail the GL_BGRA8
+// requirement)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage_EXT_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_format_BGRA8888", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_texture_storage")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 6- ES3 + GL_APPLE_texture_format_bgra8888 -> GL_EXT_texture_storage +
+// GL_EXT_texture_format_BGRA8888 (driver promises to handle GL_BGRA8 by
+// exposing GL_APPLE_texture_format_bgra8888)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage_APPLE_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_APPLE_texture_format_BGRA8888", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 7- ES3 + GL_EXT_texture_storage + GL_EXT_texture_format_BGRA8888 ->
+// GL_EXT_texture_storage + GL_EXT_texture_format_BGRA8888 (driver promises to
+// handle GL_BGRA8 by exposing GL_EXT_texture_storage)
+TEST_F(FeatureInfoTest, InitializeGLES3_EXT_texture_storage_EXT_BGRA) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_EXT_texture_storage GL_EXT_texture_format_BGRA8888",
+ "",
+ "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_format_BGRA8888"));
+}
+
+// 8- ES3 + none of the above -> GL_EXT_texture_storage (and no
+// GL_EXT_texture_format_BGRA8888 - we don't claim to handle GL_BGRA8)
+TEST_F(FeatureInfoTest, InitializeGLES3_texture_storage) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 3.0");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_texture_storage"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_EXT_texture_format_BGRA8888")));
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_texture_float) {
+ SetupInitExpectations("GL_ARB_texture_float");
+ EXPECT_TRUE(info_->feature_flags().chromium_color_buffer_float_rgba);
+ EXPECT_TRUE(info_->feature_flags().chromium_color_buffer_float_rgb);
+ std::string extensions = info_->extensions() + " ";
+ EXPECT_THAT(extensions, HasSubstr("GL_CHROMIUM_color_buffer_float_rgb "));
+ EXPECT_THAT(extensions, HasSubstr("GL_CHROMIUM_color_buffer_float_rgba"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGBA32F));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_RGB32F));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_floatGLES2) {
+ SetupInitExpectations("GL_OES_texture_float");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_half_float")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_float_linearGLES2) {
+ SetupInitExpectations("GL_OES_texture_float GL_OES_texture_float_linear");
+ EXPECT_TRUE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float"));
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_half_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_float_linear"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_half_floatGLES2) {
+ SetupInitExpectations("GL_OES_texture_half_float");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_FALSE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_half_float_linear")));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_texture_half_float_linearGLES2) {
+ SetupInitExpectations(
+ "GL_OES_texture_half_float GL_OES_texture_half_float_linear");
+ EXPECT_FALSE(info_->feature_flags().enable_texture_float_linear);
+ EXPECT_TRUE(info_->feature_flags().enable_texture_half_float_linear);
+ EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_OES_texture_float")));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_texture_half_float"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_OES_texture_float_linear")));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_texture_half_float_linear"));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_FLOAT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_HALF_FLOAT_OES));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_FLOAT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_FLOAT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGB).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_RGBA).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE).IsValid(
+ GL_HALF_FLOAT_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_LUMINANCE_ALPHA).IsValid(
+ GL_HALF_FLOAT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_framebuffer_multisample) {
+ SetupInitExpectations("GL_EXT_framebuffer_multisample");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeANGLE_framebuffer_multisample) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_ANGLE_framebuffer_multisample", kGLRendererStringANGLE, "");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+// We don't allow ANGLE_framebuffer_multisample on non-ANGLE implementations,
+// because we wouldn't be choosing the right driver entry point and because the
+// extension was falsely advertised on some Android devices (crbug.com/165736).
+TEST_F(FeatureInfoTest, InitializeANGLE_framebuffer_multisampleWithoutANGLE) {
+ SetupInitExpectations("GL_ANGLE_framebuffer_multisample");
+ EXPECT_FALSE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_framebuffer_multisample")));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_READ_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->frame_buffer_target.IsValid(
+ GL_DRAW_FRAMEBUFFER_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_READ_FRAMEBUFFER_BINDING_EXT));
+ EXPECT_FALSE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_FALSE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_multisampled_render_to_texture) {
+ SetupInitExpectations("GL_EXT_multisampled_render_to_texture");
+ EXPECT_TRUE(info_->feature_flags(
+ ).multisampled_render_to_texture);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_multisampled_render_to_texture"));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_parameter.IsValid(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeIMG_multisampled_render_to_texture) {
+ SetupInitExpectations("GL_IMG_multisampled_render_to_texture");
+ EXPECT_TRUE(info_->feature_flags(
+ ).multisampled_render_to_texture);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_img_for_multisampled_render_to_texture);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_multisampled_render_to_texture"));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_parameter.IsValid(
+ GL_RENDERBUFFER_SAMPLES_EXT));
+ EXPECT_TRUE(info_->validators()->frame_buffer_parameter.IsValid(
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_texture_filter_anisotropic) {
+ SetupInitExpectations("GL_EXT_texture_filter_anisotropic");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_texture_filter_anisotropic"));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_TEXTURE_MAX_ANISOTROPY_EXT));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_ARB_depth_texture) {
+ SetupInitExpectations("GL_ARB_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_ARB_depth_texture) {
+ SetupInitExpectations("GL_OES_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeANGLE_depth_texture) {
+ SetupInitExpectations("GL_ANGLE_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_ANGLE_depth_texture")));
+ EXPECT_TRUE(info_->feature_flags().angle_depth_texture);
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT16));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH_COMPONENT32_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_DEPTH24_STENCIL8_OES));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_FALSE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_packed_depth_stencil) {
+ SetupInitExpectations("GL_EXT_packed_depth_stencil");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_packed_depth_stencil) {
+ SetupInitExpectations("GL_OES_packed_depth_stencil");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_FALSE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest,
+ InitializeOES_packed_depth_stencil_and_GL_ARB_depth_texture) {
+ SetupInitExpectations("GL_OES_packed_depth_stencil GL_ARB_depth_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_packed_depth_stencil"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH24_STENCIL8));
+ EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(
+ GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(
+ GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT).IsValid(
+ GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL).IsValid(
+ GL_UNSIGNED_INT_24_8));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_depth24) {
+ SetupInitExpectations("GL_OES_depth24");
+ EXPECT_TRUE(info_->feature_flags().oes_depth24);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_depth24"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_DEPTH_COMPONENT24));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_standard_derivatives) {
+ SetupInitExpectations("GL_OES_standard_derivatives");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_standard_derivatives"));
+ EXPECT_TRUE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_TRUE(info_->validators()->hint_target.IsValid(
+ GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_rgb8_rgba8) {
+ SetupInitExpectations("GL_OES_rgb8_rgba8");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_rgb8_rgba8"));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_RGB8_OES));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(
+ GL_RGBA8_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_EGL_image_external) {
+ SetupInitExpectations("GL_OES_EGL_image_external");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_EGL_image_external"));
+ EXPECT_TRUE(info_->feature_flags().oes_egl_image_external);
+ EXPECT_TRUE(info_->validators()->texture_bind_target.IsValid(
+ GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_TRUE(info_->validators()->get_tex_param_target.IsValid(
+ GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_TRUE(info_->validators()->texture_parameter.IsValid(
+ GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES));
+ EXPECT_TRUE(info_->validators()->g_l_state.IsValid(
+ GL_TEXTURE_BINDING_EXTERNAL_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_compressed_ETC1_RGB8_texture) {
+ SetupInitExpectations("GL_OES_compressed_ETC1_RGB8_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_compressed_ETC1_RGB8_texture"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES));
+ EXPECT_FALSE(info_->validators()->texture_internal_format.IsValid(
+ GL_ETC1_RGB8_OES));
+}
+
+TEST_F(FeatureInfoTest, InitializeAMD_compressed_ATC_texture) {
+ SetupInitExpectations("GL_AMD_compressed_ATC_texture");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_AMD_compressed_ATC_texture"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGB_AMD));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_EXPLICIT_ALPHA_AMD));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD));
+}
+
+TEST_F(FeatureInfoTest, InitializeIMG_texture_compression_pvrtc) {
+ SetupInitExpectations("GL_IMG_texture_compression_pvrtc");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_IMG_texture_compression_pvrtc"));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG));
+ EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
+ GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_occlusion_query_boolean) {
+ SetupInitExpectations("GL_EXT_occlusion_query_boolean");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_occlusion_query) {
+ SetupInitExpectations("GL_ARB_occlusion_query");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_occlusion_query2) {
+ SetupInitExpectations("GL_ARB_occlusion_query2 GL_ARB_occlusion_query2");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_EXT_occlusion_query_boolean"));
+ EXPECT_TRUE(info_->feature_flags().occlusion_query_boolean);
+ EXPECT_TRUE(info_->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean);
+ EXPECT_FALSE(info_->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean);
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_vertex_array_object) {
+ SetupInitExpectations("GL_OES_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeARB_vertex_array_object) {
+ SetupInitExpectations("GL_ARB_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeAPPLE_vertex_array_object) {
+ SetupInitExpectations("GL_APPLE_vertex_array_object");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeNo_vertex_array_object) {
+ SetupInitExpectations("");
+ // Even if the native extensions are not available the implementation
+ // may still emulate the GL_OES_vertex_array_object functionality. In this
+ // scenario native_vertex_array_object must be false.
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_vertex_array_object"));
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeOES_element_index_uint) {
+ SetupInitExpectations("GL_OES_element_index_uint");
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_OES_element_index_uint"));
+ EXPECT_TRUE(info_->validators()->index_type.IsValid(GL_UNSIGNED_INT));
+}
+
+TEST_F(FeatureInfoTest, InitializeVAOsWithClientSideArrays) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
+ SetupInitExpectationsWithCommandLine("GL_OES_vertex_array_object",
+ command_line);
+ EXPECT_TRUE(info_->workarounds().use_client_side_arrays_for_stream_buffers);
+ EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_blend_minmax) {
+ SetupInitExpectations("GL_EXT_blend_minmax");
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_blend_minmax"));
+ EXPECT_TRUE(info_->validators()->equation.IsValid(GL_MIN_EXT));
+ EXPECT_TRUE(info_->validators()->equation.IsValid(GL_MAX_EXT));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_frag_depth) {
+ SetupInitExpectations("GL_EXT_frag_depth");
+ EXPECT_TRUE(info_->feature_flags().ext_frag_depth);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_frag_depth"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_shader_texture_lod) {
+ SetupInitExpectations("GL_EXT_shader_texture_lod");
+ EXPECT_TRUE(info_->feature_flags().ext_shader_texture_lod);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_shader_texture_lod"));
+}
+
+TEST_F(FeatureInfoTest, InitializeEXT_discard_framebuffer) {
+ SetupInitExpectations("GL_EXT_discard_framebuffer");
+ EXPECT_TRUE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_discard_framebuffer"));
+}
+
+TEST_F(FeatureInfoTest, InitializeSamplersWithARBSamplerObjects) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_ARB_sampler_objects", "", "OpenGL 3.0");
+ EXPECT_TRUE(info_->feature_flags().enable_samplers);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithES3) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 3.0");
+ EXPECT_TRUE(info_->feature_flags().chromium_framebuffer_multisample);
+ EXPECT_TRUE(info_->feature_flags().use_core_framebuffer_multisample);
+ EXPECT_THAT(info_->extensions(),
+ HasSubstr("GL_CHROMIUM_framebuffer_multisample"));
+ EXPECT_TRUE(info_->feature_flags().use_async_readpixels);
+ EXPECT_TRUE(info_->feature_flags().oes_standard_derivatives);
+ EXPECT_TRUE(info_->feature_flags().oes_depth24);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_GOOGLE_depth_texture"));
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_depth_texture"));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_COMPONENT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->validators()->pixel_type.IsValid(GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT)
+ .IsValid(GL_UNSIGNED_SHORT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_COMPONENT)
+ .IsValid(GL_UNSIGNED_INT));
+ EXPECT_TRUE(info_->GetTextureFormatValidator(GL_DEPTH_STENCIL)
+ .IsValid(GL_UNSIGNED_INT_24_8));
+ EXPECT_TRUE(info_->feature_flags().packed_depth24_stencil8);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_OES_depth24"));
+ EXPECT_TRUE(
+ info_->validators()->render_buffer_format.IsValid(GL_DEPTH_COMPONENT24));
+ EXPECT_TRUE(
+ info_->validators()->render_buffer_format.IsValid(GL_DEPTH24_STENCIL8));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_DEPTH_STENCIL));
+ EXPECT_TRUE(info_->feature_flags().npot_ok);
+ EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
+ EXPECT_TRUE(info_->feature_flags().enable_samplers);
+ EXPECT_TRUE(info_->feature_flags().map_buffer_range);
+ EXPECT_TRUE(info_->feature_flags().ext_discard_framebuffer);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_discard_framebuffer"));
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeWithoutSamplers) {
+ SetupInitExpectationsWithGLVersion("", "", "OpenGL GL 3.0");
+ EXPECT_FALSE(info_->feature_flags().enable_samplers);
+}
+
+TEST_F(FeatureInfoTest, ParseDriverBugWorkaroundsSingle) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::EXIT_ON_CONTEXT_LOST));
+ // Workarounds should get parsed without the need for a context.
+ SetupWithCommandLine(command_line);
+ EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
+}
+
+TEST_F(FeatureInfoTest, ParseDriverBugWorkaroundsMultiple) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::EXIT_ON_CONTEXT_LOST) + "," +
+ base::IntToString(gpu::MAX_CUBE_MAP_TEXTURE_SIZE_LIMIT_1024) + "," +
+ base::IntToString(gpu::MAX_TEXTURE_SIZE_LIMIT_4096));
+ // Workarounds should get parsed without the need for a context.
+ SetupWithCommandLine(command_line);
+ EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
+ EXPECT_EQ(1024, info_->workarounds().max_cube_map_texture_size);
+ EXPECT_EQ(4096, info_->workarounds().max_texture_size);
+}
+
+TEST_F(FeatureInfoTest, InitializeWithARBSync) {
+ SetupInitExpectations("GL_ARB_sync");
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeWithNVFence) {
+ SetupInitExpectations("GL_NV_fence");
+ EXPECT_TRUE(info_->feature_flags().chromium_sync_query);
+ EXPECT_TRUE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, ARBSyncDisabled) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::DISABLE_ARB_SYNC));
+ SetupInitExpectationsWithCommandLine("GL_ARB_sync", command_line);
+ EXPECT_FALSE(info_->feature_flags().chromium_sync_query);
+ EXPECT_FALSE(gfx::GLFence::IsSupported());
+}
+
+TEST_F(FeatureInfoTest, InitializeCHROMIUM_path_rendering) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_NV_path_rendering GL_EXT_direct_state_access", "", "4.3");
+ EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
+}
+
+TEST_F(FeatureInfoTest, InitializeCHROMIUM_path_rendering2) {
+ SetupInitExpectationsWithGLVersion(
+ "GL_NV_path_rendering", "", "OpenGL ES 3.1");
+ EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
+}
+
+TEST_F(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering) {
+ SetupInitExpectationsWithGLVersion("", "", "4.3");
+ EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_path_rendering")));
+}
+
+TEST_F(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering2) {
+ SetupInitExpectationsWithGLVersion("GL_NV_path_rendering", "", "4.3");
+ EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
+ EXPECT_THAT(info_->extensions(),
+ Not(HasSubstr("GL_CHROMIUM_path_rendering")));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/framebuffer_manager.cc b/gpu/command_buffer/service/framebuffer_manager.cc
new file mode 100644
index 0000000..b8026c0
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager.cc
@@ -0,0 +1,763 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+DecoderFramebufferState::DecoderFramebufferState()
+ : clear_state_dirty(false),
+ bound_read_framebuffer(NULL),
+ bound_draw_framebuffer(NULL) {
+}
+
+DecoderFramebufferState::~DecoderFramebufferState() {
+}
+
+Framebuffer::FramebufferComboCompleteMap*
+ Framebuffer::framebuffer_combo_complete_map_;
+
+// Framebuffer completeness is not cacheable on OS X because of dynamic
+// graphics switching.
+// http://crbug.com/180876
+#if defined(OS_MACOSX)
+bool Framebuffer::allow_framebuffer_combo_complete_map_ = false;
+#else
+bool Framebuffer::allow_framebuffer_combo_complete_map_ = true;
+#endif
+
+void Framebuffer::ClearFramebufferCompleteComboMap() {
+ if (framebuffer_combo_complete_map_) {
+ framebuffer_combo_complete_map_->clear();
+ }
+}
+
+class RenderbufferAttachment
+ : public Framebuffer::Attachment {
+ public:
+ explicit RenderbufferAttachment(
+ Renderbuffer* renderbuffer)
+ : renderbuffer_(renderbuffer) {
+ }
+
+ virtual GLsizei width() const OVERRIDE {
+ return renderbuffer_->width();
+ }
+
+ virtual GLsizei height() const OVERRIDE {
+ return renderbuffer_->height();
+ }
+
+ virtual GLenum internal_format() const OVERRIDE {
+ return renderbuffer_->internal_format();
+ }
+
+ virtual GLenum texture_type() const OVERRIDE {
+ return 0;
+ }
+
+ virtual GLsizei samples() const OVERRIDE {
+ return renderbuffer_->samples();
+ }
+
+ virtual GLuint object_name() const OVERRIDE {
+ return renderbuffer_->client_id();
+ }
+
+ virtual bool cleared() const OVERRIDE {
+ return renderbuffer_->cleared();
+ }
+
+ virtual void SetCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* /* texture_manager */,
+ bool cleared) OVERRIDE {
+ renderbuffer_manager->SetCleared(renderbuffer_.get(), cleared);
+ }
+
+ virtual bool IsTexture(
+ TextureRef* /* texture */) const OVERRIDE {
+ return false;
+ }
+
+ virtual bool IsRenderbuffer(
+ Renderbuffer* renderbuffer) const OVERRIDE {
+ return renderbuffer_.get() == renderbuffer;
+ }
+
+ virtual bool CanRenderTo() const OVERRIDE {
+ return true;
+ }
+
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer) const OVERRIDE {
+ // Nothing to do for renderbuffers.
+ }
+
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) OVERRIDE {
+ uint32 need = GLES2Util::GetChannelsNeededForAttachmentType(
+ attachment_type, max_color_attachments);
+ uint32 have = GLES2Util::GetChannelsForFormat(internal_format());
+ return (need & have) != 0;
+ }
+
+ Renderbuffer* renderbuffer() const {
+ return renderbuffer_.get();
+ }
+
+ virtual size_t GetSignatureSize(
+ TextureManager* texture_manager) const OVERRIDE {
+ return renderbuffer_->GetSignatureSize();
+ }
+
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const OVERRIDE {
+ DCHECK(signature);
+ renderbuffer_->AddToSignature(signature);
+ }
+
+ virtual void OnWillRenderTo() const OVERRIDE {}
+ virtual void OnDidRenderTo() const OVERRIDE {}
+
+ protected:
+ virtual ~RenderbufferAttachment() { }
+
+ private:
+ scoped_refptr<Renderbuffer> renderbuffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(RenderbufferAttachment);
+};
+
+class TextureAttachment
+ : public Framebuffer::Attachment {
+ public:
+ TextureAttachment(
+ TextureRef* texture_ref, GLenum target, GLint level, GLsizei samples)
+ : texture_ref_(texture_ref),
+ target_(target),
+ level_(level),
+ samples_(samples) {
+ }
+
+ virtual GLsizei width() const OVERRIDE {
+ GLsizei temp_width = 0;
+ GLsizei temp_height = 0;
+ texture_ref_->texture()->GetLevelSize(
+ target_, level_, &temp_width, &temp_height);
+ return temp_width;
+ }
+
+ virtual GLsizei height() const OVERRIDE {
+ GLsizei temp_width = 0;
+ GLsizei temp_height = 0;
+ texture_ref_->texture()->GetLevelSize(
+ target_, level_, &temp_width, &temp_height);
+ return temp_height;
+ }
+
+ virtual GLenum internal_format() const OVERRIDE {
+ GLenum temp_type = 0;
+ GLenum temp_internal_format = 0;
+ texture_ref_->texture()->GetLevelType(
+ target_, level_, &temp_type, &temp_internal_format);
+ return temp_internal_format;
+ }
+
+ virtual GLenum texture_type() const OVERRIDE {
+ GLenum temp_type = 0;
+ GLenum temp_internal_format = 0;
+ texture_ref_->texture()->GetLevelType(
+ target_, level_, &temp_type, &temp_internal_format);
+ return temp_type;
+ }
+
+ virtual GLsizei samples() const OVERRIDE {
+ return samples_;
+ }
+
+ virtual GLuint object_name() const OVERRIDE {
+ return texture_ref_->client_id();
+ }
+
+ virtual bool cleared() const OVERRIDE {
+ return texture_ref_->texture()->IsLevelCleared(target_, level_);
+ }
+
+ virtual void SetCleared(
+ RenderbufferManager* /* renderbuffer_manager */,
+ TextureManager* texture_manager,
+ bool cleared) OVERRIDE {
+ texture_manager->SetLevelCleared(
+ texture_ref_.get(), target_, level_, cleared);
+ }
+
+ virtual bool IsTexture(TextureRef* texture) const OVERRIDE {
+ return texture == texture_ref_.get();
+ }
+
+ virtual bool IsRenderbuffer(
+ Renderbuffer* /* renderbuffer */)
+ const OVERRIDE {
+ return false;
+ }
+
+ TextureRef* texture() const {
+ return texture_ref_.get();
+ }
+
+ virtual bool CanRenderTo() const OVERRIDE {
+ return texture_ref_->texture()->CanRenderTo();
+ }
+
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer)
+ const OVERRIDE {
+ texture_ref_->texture()->DetachFromFramebuffer();
+ framebuffer->OnTextureRefDetached(texture_ref_.get());
+ }
+
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) OVERRIDE {
+ GLenum type = 0;
+ GLenum internal_format = 0;
+ if (!texture_ref_->texture()->GetLevelType(
+ target_, level_, &type, &internal_format)) {
+ return false;
+ }
+ uint32 need = GLES2Util::GetChannelsNeededForAttachmentType(
+ attachment_type, max_color_attachments);
+ uint32 have = GLES2Util::GetChannelsForFormat(internal_format);
+
+ // Workaround for NVIDIA drivers that incorrectly expose these formats as
+ // renderable:
+ if (internal_format == GL_LUMINANCE || internal_format == GL_ALPHA ||
+ internal_format == GL_LUMINANCE_ALPHA) {
+ return false;
+ }
+ return (need & have) != 0;
+ }
+
+ virtual size_t GetSignatureSize(
+ TextureManager* texture_manager) const OVERRIDE {
+ return texture_manager->GetSignatureSize();
+ }
+
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const OVERRIDE {
+ DCHECK(signature);
+ texture_manager->AddToSignature(
+ texture_ref_.get(), target_, level_, signature);
+ }
+
+ virtual void OnWillRenderTo() const OVERRIDE {
+ texture_ref_->texture()->OnWillModifyPixels();
+ }
+
+ virtual void OnDidRenderTo() const OVERRIDE {
+ texture_ref_->texture()->OnDidModifyPixels();
+ }
+
+ protected:
+ virtual ~TextureAttachment() {}
+
+ private:
+ scoped_refptr<TextureRef> texture_ref_;
+ GLenum target_;
+ GLint level_;
+ GLsizei samples_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureAttachment);
+};
+
+FramebufferManager::TextureDetachObserver::TextureDetachObserver() {}
+
+FramebufferManager::TextureDetachObserver::~TextureDetachObserver() {}
+
+FramebufferManager::FramebufferManager(
+ uint32 max_draw_buffers, uint32 max_color_attachments)
+ : framebuffer_state_change_count_(1),
+ framebuffer_count_(0),
+ have_context_(true),
+ max_draw_buffers_(max_draw_buffers),
+ max_color_attachments_(max_color_attachments) {
+ DCHECK_GT(max_draw_buffers_, 0u);
+ DCHECK_GT(max_color_attachments_, 0u);
+}
+
+FramebufferManager::~FramebufferManager() {
+ DCHECK(framebuffers_.empty());
+ // If this triggers, that means something is keeping a reference to a
+ // Framebuffer belonging to this.
+ CHECK_EQ(framebuffer_count_, 0u);
+}
+
+void Framebuffer::MarkAsDeleted() {
+ deleted_ = true;
+ while (!attachments_.empty()) {
+ Attachment* attachment = attachments_.begin()->second.get();
+ attachment->DetachFromFramebuffer(this);
+ attachments_.erase(attachments_.begin());
+ }
+}
+
+void FramebufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ framebuffers_.clear();
+}
+
+void FramebufferManager::StartTracking(
+ Framebuffer* /* framebuffer */) {
+ ++framebuffer_count_;
+}
+
+void FramebufferManager::StopTracking(
+ Framebuffer* /* framebuffer */) {
+ --framebuffer_count_;
+}
+
+void FramebufferManager::CreateFramebuffer(
+ GLuint client_id, GLuint service_id) {
+ std::pair<FramebufferMap::iterator, bool> result =
+ framebuffers_.insert(
+ std::make_pair(
+ client_id,
+ scoped_refptr<Framebuffer>(
+ new Framebuffer(this, service_id))));
+ DCHECK(result.second);
+}
+
+Framebuffer::Framebuffer(
+ FramebufferManager* manager, GLuint service_id)
+ : manager_(manager),
+ deleted_(false),
+ service_id_(service_id),
+ has_been_bound_(false),
+ framebuffer_complete_state_count_id_(0) {
+ manager->StartTracking(this);
+ DCHECK_GT(manager->max_draw_buffers_, 0u);
+ draw_buffers_.reset(new GLenum[manager->max_draw_buffers_]);
+ draw_buffers_[0] = GL_COLOR_ATTACHMENT0;
+ for (uint32 i = 1; i < manager->max_draw_buffers_; ++i)
+ draw_buffers_[i] = GL_NONE;
+}
+
+Framebuffer::~Framebuffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteFramebuffersEXT(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+bool Framebuffer::HasUnclearedAttachment(
+ GLenum attachment) const {
+ AttachmentMap::const_iterator it =
+ attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ const Attachment* attachment = it->second.get();
+ return !attachment->cleared();
+ }
+ return false;
+}
+
+bool Framebuffer::HasUnclearedColorAttachments() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ if (it->first >= GL_COLOR_ATTACHMENT0 &&
+ it->first < GL_COLOR_ATTACHMENT0 + manager_->max_draw_buffers_) {
+ const Attachment* attachment = it->second.get();
+ if (!attachment->cleared())
+ return true;
+ }
+ }
+ return false;
+}
+
+void Framebuffer::ChangeDrawBuffersHelper(bool recover) const {
+ scoped_ptr<GLenum[]> buffers(new GLenum[manager_->max_draw_buffers_]);
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i)
+ buffers[i] = GL_NONE;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ if (it->first >= GL_COLOR_ATTACHMENT0 &&
+ it->first < GL_COLOR_ATTACHMENT0 + manager_->max_draw_buffers_) {
+ buffers[it->first - GL_COLOR_ATTACHMENT0] = it->first;
+ }
+ }
+ bool different = false;
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i) {
+ if (buffers[i] != draw_buffers_[i]) {
+ different = true;
+ break;
+ }
+ }
+ if (different) {
+ if (recover)
+ glDrawBuffersARB(manager_->max_draw_buffers_, draw_buffers_.get());
+ else
+ glDrawBuffersARB(manager_->max_draw_buffers_, buffers.get());
+ }
+}
+
+void Framebuffer::PrepareDrawBuffersForClear() const {
+ bool recover = false;
+ ChangeDrawBuffersHelper(recover);
+}
+
+void Framebuffer::RestoreDrawBuffersAfterClear() const {
+ bool recover = true;
+ ChangeDrawBuffersHelper(recover);
+}
+
+void Framebuffer::MarkAttachmentAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ GLenum attachment,
+ bool cleared) {
+ AttachmentMap::iterator it = attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ Attachment* a = it->second.get();
+ if (a->cleared() != cleared) {
+ a->SetCleared(renderbuffer_manager,
+ texture_manager,
+ cleared);
+ }
+ }
+}
+
+void Framebuffer::MarkAttachmentsAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared) {
+ for (AttachmentMap::iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->cleared() != cleared) {
+ attachment->SetCleared(renderbuffer_manager, texture_manager, cleared);
+ }
+ }
+}
+
+bool Framebuffer::HasDepthAttachment() const {
+ return attachments_.find(GL_DEPTH_STENCIL_ATTACHMENT) != attachments_.end() ||
+ attachments_.find(GL_DEPTH_ATTACHMENT) != attachments_.end();
+}
+
+bool Framebuffer::HasStencilAttachment() const {
+ return attachments_.find(GL_DEPTH_STENCIL_ATTACHMENT) != attachments_.end() ||
+ attachments_.find(GL_STENCIL_ATTACHMENT) != attachments_.end();
+}
+
+GLenum Framebuffer::GetColorAttachmentFormat() const {
+ AttachmentMap::const_iterator it = attachments_.find(GL_COLOR_ATTACHMENT0);
+ if (it == attachments_.end()) {
+ return 0;
+ }
+ const Attachment* attachment = it->second.get();
+ return attachment->internal_format();
+}
+
+GLenum Framebuffer::GetColorAttachmentTextureType() const {
+ AttachmentMap::const_iterator it = attachments_.find(GL_COLOR_ATTACHMENT0);
+ if (it == attachments_.end()) {
+ return 0;
+ }
+ const Attachment* attachment = it->second.get();
+ return attachment->texture_type();
+}
+
+GLenum Framebuffer::IsPossiblyComplete() const {
+ if (attachments_.empty()) {
+ return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+ }
+
+ GLsizei width = -1;
+ GLsizei height = -1;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ GLenum attachment_type = it->first;
+ Attachment* attachment = it->second.get();
+ if (!attachment->ValidForAttachmentType(attachment_type,
+ manager_->max_color_attachments_)) {
+ return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+ }
+ if (width < 0) {
+ width = attachment->width();
+ height = attachment->height();
+ if (width == 0 || height == 0) {
+ return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+ }
+ } else {
+ if (attachment->width() != width || attachment->height() != height) {
+ return GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT;
+ }
+ }
+
+ if (!attachment->CanRenderTo()) {
+ return GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ }
+
+ // This does not mean the framebuffer is actually complete. It just means our
+ // checks passed.
+ return GL_FRAMEBUFFER_COMPLETE;
+}
+
+GLenum Framebuffer::GetStatus(
+ TextureManager* texture_manager, GLenum target) const {
+ // Check if we have this combo already.
+ std::string signature;
+ if (allow_framebuffer_combo_complete_map_) {
+ size_t signature_size = sizeof(target);
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ signature_size += sizeof(it->first) +
+ attachment->GetSignatureSize(texture_manager);
+ }
+
+ signature.reserve(signature_size);
+ signature.append(reinterpret_cast<const char*>(&target), sizeof(target));
+
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ signature.append(reinterpret_cast<const char*>(&it->first),
+ sizeof(it->first));
+ attachment->AddToSignature(texture_manager, &signature);
+ }
+ DCHECK(signature.size() == signature_size);
+
+ if (!framebuffer_combo_complete_map_) {
+ framebuffer_combo_complete_map_ = new FramebufferComboCompleteMap();
+ }
+
+ FramebufferComboCompleteMap::const_iterator it =
+ framebuffer_combo_complete_map_->find(signature);
+ if (it != framebuffer_combo_complete_map_->end()) {
+ return GL_FRAMEBUFFER_COMPLETE;
+ }
+ }
+
+ GLenum result = glCheckFramebufferStatusEXT(target);
+
+ // Insert the new result into the combo map.
+ if (allow_framebuffer_combo_complete_map_ &&
+ result == GL_FRAMEBUFFER_COMPLETE) {
+ framebuffer_combo_complete_map_->insert(std::make_pair(signature, true));
+ }
+
+ return result;
+}
+
+bool Framebuffer::IsCleared() const {
+ // are all the attachments cleaared?
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (!attachment->cleared()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+GLenum Framebuffer::GetDrawBuffer(GLenum draw_buffer) const {
+ GLsizei index = static_cast<GLsizei>(
+ draw_buffer - GL_DRAW_BUFFER0_ARB);
+ CHECK(index >= 0 &&
+ index < static_cast<GLsizei>(manager_->max_draw_buffers_));
+ return draw_buffers_[index];
+}
+
+void Framebuffer::SetDrawBuffers(GLsizei n, const GLenum* bufs) {
+ DCHECK(n <= static_cast<GLsizei>(manager_->max_draw_buffers_));
+ for (GLsizei i = 0; i < n; ++i)
+ draw_buffers_[i] = bufs[i];
+}
+
+bool Framebuffer::HasAlphaMRT() const {
+ for (uint32 i = 0; i < manager_->max_draw_buffers_; ++i) {
+ if (draw_buffers_[i] != GL_NONE) {
+ const Attachment* attachment = GetAttachment(draw_buffers_[i]);
+ if (!attachment)
+ continue;
+ if ((GLES2Util::GetChannelsForFormat(
+ attachment->internal_format()) & 0x0008) != 0)
+ return true;
+ }
+ }
+ return false;
+}
+
+void Framebuffer::UnbindRenderbuffer(
+ GLenum target, Renderbuffer* renderbuffer) {
+ bool done;
+ do {
+ done = true;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->IsRenderbuffer(renderbuffer)) {
+ // TODO(gman): manually detach renderbuffer.
+ // glFramebufferRenderbufferEXT(target, it->first, GL_RENDERBUFFER, 0);
+ AttachRenderbuffer(it->first, NULL);
+ done = false;
+ break;
+ }
+ }
+ } while (!done);
+}
+
+void Framebuffer::UnbindTexture(
+ GLenum target, TextureRef* texture_ref) {
+ bool done;
+ do {
+ done = true;
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ Attachment* attachment = it->second.get();
+ if (attachment->IsTexture(texture_ref)) {
+ // TODO(gman): manually detach texture.
+ // glFramebufferTexture2DEXT(target, it->first, GL_TEXTURE_2D, 0, 0);
+ AttachTexture(it->first, NULL, GL_TEXTURE_2D, 0, 0);
+ done = false;
+ break;
+ }
+ }
+ } while (!done);
+}
+
+Framebuffer* FramebufferManager::GetFramebuffer(
+ GLuint client_id) {
+ FramebufferMap::iterator it = framebuffers_.find(client_id);
+ return it != framebuffers_.end() ? it->second.get() : NULL;
+}
+
+void FramebufferManager::RemoveFramebuffer(GLuint client_id) {
+ FramebufferMap::iterator it = framebuffers_.find(client_id);
+ if (it != framebuffers_.end()) {
+ it->second->MarkAsDeleted();
+ framebuffers_.erase(it);
+ }
+}
+
+void Framebuffer::AttachRenderbuffer(
+ GLenum attachment, Renderbuffer* renderbuffer) {
+ const Attachment* a = GetAttachment(attachment);
+ if (a)
+ a->DetachFromFramebuffer(this);
+ if (renderbuffer) {
+ attachments_[attachment] = scoped_refptr<Attachment>(
+ new RenderbufferAttachment(renderbuffer));
+ } else {
+ attachments_.erase(attachment);
+ }
+ framebuffer_complete_state_count_id_ = 0;
+}
+
+void Framebuffer::AttachTexture(
+ GLenum attachment, TextureRef* texture_ref, GLenum target,
+ GLint level, GLsizei samples) {
+ const Attachment* a = GetAttachment(attachment);
+ if (a)
+ a->DetachFromFramebuffer(this);
+ if (texture_ref) {
+ attachments_[attachment] = scoped_refptr<Attachment>(
+ new TextureAttachment(texture_ref, target, level, samples));
+ texture_ref->texture()->AttachToFramebuffer();
+ } else {
+ attachments_.erase(attachment);
+ }
+ framebuffer_complete_state_count_id_ = 0;
+}
+
+const Framebuffer::Attachment*
+ Framebuffer::GetAttachment(
+ GLenum attachment) const {
+ AttachmentMap::const_iterator it = attachments_.find(attachment);
+ if (it != attachments_.end()) {
+ return it->second.get();
+ }
+ return NULL;
+}
+
+void Framebuffer::OnTextureRefDetached(TextureRef* texture) {
+ manager_->OnTextureRefDetached(texture);
+}
+
+void Framebuffer::OnWillRenderTo() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ it->second->OnWillRenderTo();
+ }
+}
+
+void Framebuffer::OnDidRenderTo() const {
+ for (AttachmentMap::const_iterator it = attachments_.begin();
+ it != attachments_.end(); ++it) {
+ it->second->OnDidRenderTo();
+ }
+}
+
+bool FramebufferManager::GetClientId(
+ GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (FramebufferMap::const_iterator it = framebuffers_.begin();
+ it != framebuffers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FramebufferManager::MarkAttachmentsAsCleared(
+ Framebuffer* framebuffer,
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager) {
+ DCHECK(framebuffer);
+ framebuffer->MarkAttachmentsAsCleared(renderbuffer_manager,
+ texture_manager,
+ true);
+ MarkAsComplete(framebuffer);
+}
+
+void FramebufferManager::MarkAsComplete(
+ Framebuffer* framebuffer) {
+ DCHECK(framebuffer);
+ framebuffer->MarkAsComplete(framebuffer_state_change_count_);
+}
+
+bool FramebufferManager::IsComplete(
+ Framebuffer* framebuffer) {
+ DCHECK(framebuffer);
+ return framebuffer->framebuffer_complete_state_count_id() ==
+ framebuffer_state_change_count_;
+}
+
+void FramebufferManager::OnTextureRefDetached(TextureRef* texture) {
+ for (TextureDetachObserverVector::iterator it =
+ texture_detach_observers_.begin();
+ it != texture_detach_observers_.end();
+ ++it) {
+ TextureDetachObserver* observer = *it;
+ observer->OnTextureRefDetachedFromFramebuffer(texture);
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/framebuffer_manager.h b/gpu/command_buffer/service/framebuffer_manager.h
new file mode 100644
index 0000000..96bf7fe
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager.h
@@ -0,0 +1,317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class FramebufferManager;
+class Renderbuffer;
+class RenderbufferManager;
+class Texture;
+class TextureRef;
+class TextureManager;
+
+// Info about a particular Framebuffer.
+class GPU_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
+ public:
+ class Attachment : public base::RefCounted<Attachment> {
+ public:
+ virtual GLsizei width() const = 0;
+ virtual GLsizei height() const = 0;
+ virtual GLenum internal_format() const = 0;
+ virtual GLenum texture_type() const = 0;
+ virtual GLsizei samples() const = 0;
+ virtual GLuint object_name() const = 0;
+ virtual bool cleared() const = 0;
+ virtual void SetCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared) = 0;
+ virtual bool IsTexture(TextureRef* texture) const = 0;
+ virtual bool IsRenderbuffer(
+ Renderbuffer* renderbuffer) const = 0;
+ virtual bool CanRenderTo() const = 0;
+ virtual void DetachFromFramebuffer(Framebuffer* framebuffer) const = 0;
+ virtual bool ValidForAttachmentType(
+ GLenum attachment_type, uint32 max_color_attachments) = 0;
+ virtual size_t GetSignatureSize(TextureManager* texture_manager) const = 0;
+ virtual void AddToSignature(
+ TextureManager* texture_manager, std::string* signature) const = 0;
+ virtual void OnWillRenderTo() const = 0;
+ virtual void OnDidRenderTo() const = 0;
+
+ protected:
+ friend class base::RefCounted<Attachment>;
+ virtual ~Attachment() {}
+ };
+
+ Framebuffer(FramebufferManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ bool HasUnclearedAttachment(GLenum attachment) const;
+ bool HasUnclearedColorAttachments() const;
+
+ void MarkAttachmentAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ GLenum attachment,
+ bool cleared);
+
+ // Attaches a renderbuffer to a particlar attachment.
+ // Pass null to detach.
+ void AttachRenderbuffer(
+ GLenum attachment, Renderbuffer* renderbuffer);
+
+ // Attaches a texture to a particlar attachment. Pass null to detach.
+ void AttachTexture(
+ GLenum attachment, TextureRef* texture_ref, GLenum target,
+ GLint level, GLsizei samples);
+
+ // Unbinds the given renderbuffer if it is bound.
+ void UnbindRenderbuffer(
+ GLenum target, Renderbuffer* renderbuffer);
+
+ // Unbinds the given texture if it is bound.
+ void UnbindTexture(
+ GLenum target, TextureRef* texture_ref);
+
+ const Attachment* GetAttachment(GLenum attachment) const;
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ void MarkAsValid() {
+ has_been_bound_ = true;
+ }
+
+ bool IsValid() const {
+ return has_been_bound_ && !IsDeleted();
+ }
+
+ bool HasDepthAttachment() const;
+ bool HasStencilAttachment() const;
+ GLenum GetColorAttachmentFormat() const;
+ // If the color attachment is a texture, returns its type; otherwise,
+ // returns 0.
+ GLenum GetColorAttachmentTextureType() const;
+
+ // Verify all the rules in OpenGL ES 2.0.25 4.4.5 are followed.
+ // Returns GL_FRAMEBUFFER_COMPLETE if there are no reasons we know we can't
+ // use this combination of attachments. Otherwise returns the value
+ // that glCheckFramebufferStatus should return for this set of attachments.
+ // Note that receiving GL_FRAMEBUFFER_COMPLETE from this function does
+ // not mean the real OpenGL will consider it framebuffer complete. It just
+ // means it passed our tests.
+ GLenum IsPossiblyComplete() const;
+
+ // Implements optimized glGetFramebufferStatus.
+ GLenum GetStatus(TextureManager* texture_manager, GLenum target) const;
+
+ // Check all attachments are cleared
+ bool IsCleared() const;
+
+ GLenum GetDrawBuffer(GLenum draw_buffer) const;
+
+ void SetDrawBuffers(GLsizei n, const GLenum* bufs);
+
+ // If a color buffer is attached to GL_COLOR_ATTACHMENTi, enable that
+ // draw buffer for glClear().
+ void PrepareDrawBuffersForClear() const;
+
+ // Restore draw buffers states that have been changed in
+ // PrepareDrawBuffersForClear().
+ void RestoreDrawBuffersAfterClear() const;
+
+ // Return true if any draw buffers has an alpha channel.
+ bool HasAlphaMRT() const;
+
+ static void ClearFramebufferCompleteComboMap();
+
+ static bool AllowFramebufferComboCompleteMapForTesting() {
+ return allow_framebuffer_combo_complete_map_;
+ }
+
+ void OnTextureRefDetached(TextureRef* texture);
+ void OnWillRenderTo() const;
+ void OnDidRenderTo() const;
+
+ private:
+ friend class FramebufferManager;
+ friend class base::RefCounted<Framebuffer>;
+
+ ~Framebuffer();
+
+ void MarkAsDeleted();
+
+ void MarkAttachmentsAsCleared(
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager,
+ bool cleared);
+
+ void MarkAsComplete(unsigned state_id) {
+ framebuffer_complete_state_count_id_ = state_id;
+ }
+
+ unsigned framebuffer_complete_state_count_id() const {
+ return framebuffer_complete_state_count_id_;
+ }
+
+ // Helper function for PrepareDrawBuffersForClear() and
+ // RestoreDrawBuffersAfterClear().
+ void ChangeDrawBuffersHelper(bool recover) const;
+
+ // The managers that owns this.
+ FramebufferManager* manager_;
+
+ bool deleted_;
+
+ // Service side framebuffer id.
+ GLuint service_id_;
+
+ // Whether this framebuffer has ever been bound.
+ bool has_been_bound_;
+
+ // state count when this framebuffer was last checked for completeness.
+ unsigned framebuffer_complete_state_count_id_;
+
+ // A map of attachments.
+ typedef base::hash_map<GLenum, scoped_refptr<Attachment> > AttachmentMap;
+ AttachmentMap attachments_;
+
+ // A map of successful frame buffer combos. If it's in the map
+ // it should be FRAMEBUFFER_COMPLETE.
+ typedef base::hash_map<std::string, bool> FramebufferComboCompleteMap;
+ static FramebufferComboCompleteMap* framebuffer_combo_complete_map_;
+ static bool allow_framebuffer_combo_complete_map_;
+
+ scoped_ptr<GLenum[]> draw_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(Framebuffer);
+};
+
+struct DecoderFramebufferState {
+ DecoderFramebufferState();
+ ~DecoderFramebufferState();
+
+ // State saved for clearing so we can clear render buffers and then
+ // restore to these values.
+ bool clear_state_dirty;
+
+ // The currently bound framebuffers
+ scoped_refptr<Framebuffer> bound_read_framebuffer;
+ scoped_refptr<Framebuffer> bound_draw_framebuffer;
+};
+
+// This class keeps track of the frambebuffers and their attached renderbuffers
+// so we can correctly clear them.
+class GPU_EXPORT FramebufferManager {
+ public:
+ class GPU_EXPORT TextureDetachObserver {
+ public:
+ TextureDetachObserver();
+ virtual ~TextureDetachObserver();
+
+ virtual void OnTextureRefDetachedFromFramebuffer(TextureRef* texture) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextureDetachObserver);
+ };
+
+ FramebufferManager(uint32 max_draw_buffers, uint32 max_color_attachments);
+ ~FramebufferManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Framebuffer for the given framebuffer.
+ void CreateFramebuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the framebuffer info for the given framebuffer.
+ Framebuffer* GetFramebuffer(GLuint client_id);
+
+ // Removes a framebuffer info for the given framebuffer.
+ void RemoveFramebuffer(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ void MarkAttachmentsAsCleared(
+ Framebuffer* framebuffer,
+ RenderbufferManager* renderbuffer_manager,
+ TextureManager* texture_manager);
+
+ void MarkAsComplete(Framebuffer* framebuffer);
+
+ bool IsComplete(Framebuffer* framebuffer);
+
+ void IncFramebufferStateChangeCount() {
+ // make sure this is never 0.
+ framebuffer_state_change_count_ =
+ (framebuffer_state_change_count_ + 1) | 0x80000000U;
+ }
+
+ void AddObserver(TextureDetachObserver* observer) {
+ texture_detach_observers_.push_back(observer);
+ }
+
+ void RemoveObserver(TextureDetachObserver* observer) {
+ texture_detach_observers_.erase(
+ std::remove(texture_detach_observers_.begin(),
+ texture_detach_observers_.end(),
+ observer),
+ texture_detach_observers_.end());
+ }
+
+ private:
+ friend class Framebuffer;
+
+ void StartTracking(Framebuffer* framebuffer);
+ void StopTracking(Framebuffer* framebuffer);
+
+ void OnTextureRefDetached(TextureRef* texture);
+
+ // Info for each framebuffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Framebuffer> >
+ FramebufferMap;
+ FramebufferMap framebuffers_;
+
+ // Incremented anytime anything changes that might effect framebuffer
+ // state.
+ unsigned framebuffer_state_change_count_;
+
+ // Counts the number of Framebuffer allocated with 'this' as its manager.
+ // Allows to check no Framebuffer will outlive this.
+ unsigned int framebuffer_count_;
+
+ bool have_context_;
+
+ uint32 max_draw_buffers_;
+ uint32 max_color_attachments_;
+
+ typedef std::vector<TextureDetachObserver*> TextureDetachObserverVector;
+ TextureDetachObserverVector texture_detach_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(FramebufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/gpu/command_buffer/service/framebuffer_manager_unittest.cc
new file mode 100644
index 0000000..1ded558
--- /dev/null
+++ b/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -0,0 +1,973 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::Return;
+
+namespace gpu {
+namespace gles2 {
+namespace {
+
+const GLint kMaxTextureSize = 64;
+const GLint kMaxCubemapSize = 64;
+const GLint kMaxRenderbufferSize = 64;
+const GLint kMaxSamples = 4;
+const uint32 kMaxDrawBuffers = 16;
+const uint32 kMaxColorAttachments = 16;
+const bool kDepth24Supported = false;
+const bool kUseDefaultTextures = false;
+
+} // namespace
+
+class FramebufferManagerTest : public GpuServiceTest {
+ public:
+ FramebufferManagerTest()
+ : manager_(1, 1),
+ texture_manager_(NULL,
+ new FeatureInfo(),
+ kMaxTextureSize,
+ kMaxCubemapSize,
+ kUseDefaultTextures),
+ renderbuffer_manager_(NULL,
+ kMaxRenderbufferSize,
+ kMaxSamples,
+ kDepth24Supported) {}
+ virtual ~FramebufferManagerTest() {
+ manager_.Destroy(false);
+ texture_manager_.Destroy(false);
+ renderbuffer_manager_.Destroy(false);
+ }
+
+ protected:
+
+ FramebufferManager manager_;
+ TextureManager texture_manager_;
+ RenderbufferManager renderbuffer_manager_;
+};
+
+TEST_F(FramebufferManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ // Check we can create framebuffer.
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ // Check framebuffer got created.
+ Framebuffer* framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 != NULL);
+ EXPECT_FALSE(framebuffer1->IsDeleted());
+ EXPECT_EQ(kService1Id, framebuffer1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(framebuffer1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent framebuffer.
+ EXPECT_TRUE(manager_.GetFramebuffer(kClient2Id) == NULL);
+ // Check trying to a remove non-existent framebuffers does not crash.
+ manager_.RemoveFramebuffer(kClient2Id);
+ // Check framebuffer gets deleted when last reference is released.
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the framebuffer after we remove it.
+ manager_.RemoveFramebuffer(kClient1Id);
+ EXPECT_TRUE(manager_.GetFramebuffer(kClient1Id) == NULL);
+}
+
+TEST_F(FramebufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create framebuffer.
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ // Check framebuffer got created.
+ Framebuffer* framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check the resources were released.
+ framebuffer1 = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer1 == NULL);
+}
+
+class FramebufferInfoTest : public GpuServiceTest {
+ public:
+ static const GLuint kClient1Id = 1;
+ static const GLuint kService1Id = 11;
+
+ FramebufferInfoTest()
+ : manager_(kMaxDrawBuffers, kMaxColorAttachments),
+ feature_info_(new FeatureInfo()),
+ renderbuffer_manager_(NULL, kMaxRenderbufferSize, kMaxSamples,
+ kDepth24Supported) {
+ texture_manager_.reset(new TextureManager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubemapSize,
+ kUseDefaultTextures));
+ }
+ virtual ~FramebufferInfoTest() {
+ manager_.Destroy(false);
+ texture_manager_->Destroy(false);
+ renderbuffer_manager_.Destroy(false);
+ }
+
+ protected:
+ virtual void SetUp() {
+ InitializeContext("", "");
+ }
+
+ void InitializeContext(const char* gl_version, const char* extensions) {
+ GpuServiceTest::SetUp();
+ TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(gl_.get(),
+ extensions, "", gl_version);
+ feature_info_->Initialize();
+ manager_.CreateFramebuffer(kClient1Id, kService1Id);
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ framebuffer_ = manager_.GetFramebuffer(kClient1Id);
+ ASSERT_TRUE(framebuffer_ != NULL);
+ }
+
+ FramebufferManager manager_;
+ Framebuffer* framebuffer_;
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> texture_manager_;
+ RenderbufferManager renderbuffer_manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint FramebufferInfoTest::kClient1Id;
+const GLuint FramebufferInfoTest::kService1Id;
+#endif
+
+TEST_F(FramebufferInfoTest, Basic) {
+ EXPECT_EQ(kService1Id, framebuffer_->service_id());
+ EXPECT_FALSE(framebuffer_->IsDeleted());
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_TRUE(NULL == framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_TRUE(
+ NULL == framebuffer_->GetAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+}
+
+TEST_F(FramebufferInfoTest, AttachRenderbuffer) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kRenderbufferClient2Id = 34;
+ const GLuint kRenderbufferService2Id = 334;
+ const GLuint kRenderbufferClient3Id = 35;
+ const GLuint kRenderbufferService3Id = 335;
+ const GLuint kRenderbufferClient4Id = 36;
+ const GLuint kRenderbufferService4Id = 336;
+ const GLsizei kWidth1 = 16;
+ const GLsizei kHeight1 = 32;
+ const GLenum kFormat1 = GL_RGBA4;
+ const GLenum kBadFormat1 = GL_DEPTH_COMPONENT16;
+ const GLsizei kSamples1 = 0;
+ const GLsizei kWidth2 = 16;
+ const GLsizei kHeight2 = 32;
+ const GLenum kFormat2 = GL_DEPTH_COMPONENT16;
+ const GLsizei kSamples2 = 0;
+ const GLsizei kWidth3 = 16;
+ const GLsizei kHeight3 = 32;
+ const GLenum kFormat3 = GL_STENCIL_INDEX8;
+ const GLsizei kSamples3 = 0;
+ const GLsizei kWidth4 = 16;
+ const GLsizei kHeight4 = 32;
+ const GLenum kFormat4 = GL_STENCIL_INDEX8;
+ const GLsizei kSamples4 = 0;
+
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+
+ // check adding one attachment
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, renderbuffer1);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA4),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Try a format that's not good for COLOR_ATTACHMENT0.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kBadFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Try a good format.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_FALSE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ // check adding another
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient2Id, kRenderbufferService2Id);
+ Renderbuffer* renderbuffer2 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient2Id);
+ ASSERT_TRUE(renderbuffer2 != NULL);
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer2);
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+ // The attachment has a size of 0,0 so depending on the order of the map
+ // of attachments it could either get INCOMPLETE_ATTACHMENT because it's 0,0
+ // or INCOMPLETE_DIMENSIONS because it's not the same size as the other
+ // attachment.
+ GLenum status = framebuffer_->IsPossiblyComplete();
+ EXPECT_TRUE(
+ status == GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT ||
+ status == GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT);
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ renderbuffer_manager_.SetInfo(
+ renderbuffer2, kSamples2, kFormat2, kWidth2, kHeight2);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+
+ // check marking them as cleared.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check adding one that is already cleared.
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient3Id, kRenderbufferService3Id);
+ Renderbuffer* renderbuffer3 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient3Id);
+ ASSERT_TRUE(renderbuffer3 != NULL);
+ renderbuffer_manager_.SetInfo(
+ renderbuffer3, kSamples3, kFormat3, kWidth3, kHeight3);
+ renderbuffer_manager_.SetCleared(renderbuffer3, true);
+
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, renderbuffer3);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_TRUE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check marking the renderbuffer as unclared.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer1, kSamples1, kFormat1, kWidth1, kHeight1);
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_TRUE(framebuffer_->HasStencilAttachment());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth1, attachment->width());
+ EXPECT_EQ(kHeight1, attachment->height());
+ EXPECT_EQ(kSamples1, attachment->samples());
+ EXPECT_EQ(kFormat1, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+
+ // Clear it.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ // Check replacing an attachment
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient4Id, kRenderbufferService4Id);
+ Renderbuffer* renderbuffer4 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient4Id);
+ ASSERT_TRUE(renderbuffer4 != NULL);
+ renderbuffer_manager_.SetInfo(
+ renderbuffer4, kSamples4, kFormat4, kWidth4, kHeight4);
+
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, renderbuffer4);
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ attachment = framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth4, attachment->width());
+ EXPECT_EQ(kHeight4, attachment->height());
+ EXPECT_EQ(kSamples4, attachment->samples());
+ EXPECT_EQ(kFormat4, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check changing an attachment.
+ renderbuffer_manager_.SetInfo(
+ renderbuffer4, kSamples4, kFormat4, kWidth4 + 1, kHeight4);
+
+ attachment = framebuffer_->GetAttachment(GL_STENCIL_ATTACHMENT);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth4 + 1, attachment->width());
+ EXPECT_EQ(kHeight4, attachment->height());
+ EXPECT_EQ(kSamples4, attachment->samples());
+ EXPECT_EQ(kFormat4, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check removing it.
+ framebuffer_->AttachRenderbuffer(GL_STENCIL_ATTACHMENT, NULL);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_TRUE(framebuffer_->HasDepthAttachment());
+ EXPECT_FALSE(framebuffer_->HasStencilAttachment());
+
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+
+ // Remove depth, Set color to 0 size.
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, NULL);
+ renderbuffer_manager_.SetInfo(renderbuffer1, kSamples1, kFormat1, 0, 0);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Remove color.
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+}
+
+TEST_F(FramebufferInfoTest, AttachTexture) {
+ const GLuint kTextureClient1Id = 33;
+ const GLuint kTextureService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLint kDepth = 1;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLsizei kWidth1 = 16;
+ const GLsizei kHeight1 = 32;
+ const GLint kLevel1 = 0;
+ const GLenum kFormat1 = GL_RGBA;
+ const GLenum kBadFormat1 = GL_DEPTH_COMPONENT16;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLsizei kSamples1 = 0;
+ const GLsizei kWidth2 = 16;
+ const GLsizei kHeight2 = 32;
+ const GLint kLevel2 = 0;
+ const GLenum kFormat2 = GL_RGB;
+ const GLenum kTarget2 = GL_TEXTURE_2D;
+ const GLsizei kSamples2 = 0;
+ const GLsizei kWidth3 = 75;
+ const GLsizei kHeight3 = 123;
+ const GLint kLevel3 = 0;
+ const GLenum kFormat3 = GL_RGB565;
+ const GLsizei kSamples3 = 0;
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ texture_manager_->CreateTexture(kTextureClient1Id, kTextureService1Id);
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClient1Id));
+ ASSERT_TRUE(texture1.get() != NULL);
+
+ // check adding one attachment
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture1.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ // Try format that doesn't work with COLOR_ATTACHMENT0
+ texture_manager_->SetTarget(texture1.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kBadFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kBadFormat1,
+ kType,
+ true);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Try a good format.
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kFormat1,
+ kType,
+ false);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ texture_manager_->SetLevelInfo(texture1.get(),
+ GL_TEXTURE_2D,
+ kLevel1,
+ kFormat1,
+ kWidth1,
+ kHeight1,
+ kDepth,
+ kBorder,
+ kFormat1,
+ kType,
+ true);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_EQ(static_cast<GLenum>(kFormat1),
+ framebuffer_->GetColorAttachmentFormat());
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth1, attachment->width());
+ EXPECT_EQ(kHeight1, attachment->height());
+ EXPECT_EQ(kSamples1, attachment->samples());
+ EXPECT_EQ(kFormat1, attachment->internal_format());
+ EXPECT_TRUE(attachment->cleared());
+
+ // Check replacing an attachment
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+ texture_manager_->SetTarget(texture2.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel2,
+ kFormat2,
+ kWidth2,
+ kHeight2,
+ kDepth,
+ kBorder,
+ kFormat2,
+ kType,
+ true);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget2, kLevel2, kSamples2);
+ EXPECT_EQ(static_cast<GLenum>(kFormat2),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+
+ attachment = framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth2, attachment->width());
+ EXPECT_EQ(kHeight2, attachment->height());
+ EXPECT_EQ(kSamples2, attachment->samples());
+ EXPECT_EQ(kFormat2, attachment->internal_format());
+ EXPECT_TRUE(attachment->cleared());
+
+ // Check changing attachment
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel3,
+ kFormat3,
+ kWidth3,
+ kHeight3,
+ kDepth,
+ kBorder,
+ kFormat3,
+ kType,
+ false);
+ attachment = framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_EQ(kWidth3, attachment->width());
+ EXPECT_EQ(kHeight3, attachment->height());
+ EXPECT_EQ(kSamples3, attachment->samples());
+ EXPECT_EQ(kFormat3, attachment->internal_format());
+ EXPECT_FALSE(attachment->cleared());
+ EXPECT_EQ(static_cast<GLenum>(kFormat3),
+ framebuffer_->GetColorAttachmentFormat());
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+
+ // Set to size 0
+ texture_manager_->SetLevelInfo(texture2.get(),
+ GL_TEXTURE_2D,
+ kLevel3,
+ kFormat3,
+ 0,
+ 0,
+ kDepth,
+ kBorder,
+ kFormat3,
+ kType,
+ false);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+
+ // Check removing it.
+ framebuffer_->AttachTexture(GL_COLOR_ATTACHMENT0, NULL, 0, 0, 0);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT),
+ framebuffer_->IsPossiblyComplete());
+ EXPECT_TRUE(framebuffer_->IsCleared());
+}
+
+TEST_F(FramebufferInfoTest, DrawBuffers) {
+ const GLuint kTextureClientId[] = { 33, 34 };
+ const GLuint kTextureServiceId[] = { 333, 334 };
+
+ for (GLenum i = GL_COLOR_ATTACHMENT0;
+ i < GL_COLOR_ATTACHMENT0 + kMaxColorAttachments; ++i) {
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(i));
+ }
+ EXPECT_FALSE(framebuffer_->HasUnclearedColorAttachments());
+
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT0),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER0_ARB));
+ for (GLenum i = GL_DRAW_BUFFER1_ARB;
+ i < GL_DRAW_BUFFER0_ARB + kMaxDrawBuffers; ++i) {
+ EXPECT_EQ(static_cast<GLenum>(GL_NONE),
+ framebuffer_->GetDrawBuffer(i));
+ }
+
+ for (size_t ii = 0; ii < arraysize(kTextureClientId); ++ii) {
+ texture_manager_->CreateTexture(
+ kTextureClientId[ii], kTextureServiceId[ii]);
+ scoped_refptr<TextureRef> texture(
+ texture_manager_->GetTexture(kTextureClientId[ii]));
+ ASSERT_TRUE(texture.get() != NULL);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0 + ii, texture.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(
+ framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0 + ii));
+
+ const Framebuffer::Attachment* attachment =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0 + ii);
+ ASSERT_TRUE(attachment != NULL);
+ EXPECT_TRUE(attachment->cleared());
+ }
+ EXPECT_TRUE(framebuffer_->IsCleared());
+ EXPECT_FALSE(framebuffer_->HasUnclearedColorAttachments());
+
+ // Set a texture as uncleared.
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClientId[1]));
+ texture_manager_->SetTarget(texture1.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(
+ texture1.get(), GL_TEXTURE_2D, 0, GL_RGBA, 4, 4,
+ 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, false);
+
+ const Framebuffer::Attachment* attachment1 =
+ framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT1);
+ ASSERT_TRUE(attachment1 != NULL);
+ EXPECT_FALSE(attachment1->cleared());
+ EXPECT_FALSE(framebuffer_->IsCleared());
+ EXPECT_TRUE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT1));
+ EXPECT_TRUE(framebuffer_->HasUnclearedColorAttachments());
+
+ GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
+ framebuffer_->SetDrawBuffers(2, buffers);
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT0),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER0_ARB));
+ EXPECT_EQ(static_cast<GLenum>(GL_COLOR_ATTACHMENT1),
+ framebuffer_->GetDrawBuffer(GL_DRAW_BUFFER1_ARB));
+ for (GLenum i = GL_DRAW_BUFFER2_ARB;
+ i < GL_DRAW_BUFFER0_ARB + kMaxDrawBuffers; ++i) {
+ EXPECT_EQ(static_cast<GLenum>(GL_NONE),
+ framebuffer_->GetDrawBuffer(i));
+ }
+
+ // Nothing happens.
+ framebuffer_->PrepareDrawBuffersForClear();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+
+ // Now we disable a draw buffer 1.
+ buffers[1] = GL_NONE;
+ framebuffer_->SetDrawBuffers(2, buffers);
+ // We will enable the disabled draw buffer for clear(), and disable it
+ // after the clear.
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ framebuffer_->PrepareDrawBuffersForClear();
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+
+ // Now remove draw buffer 1's attachment.
+ framebuffer_->AttachTexture(GL_COLOR_ATTACHMENT1, NULL, 0, 0, 0);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT1) == NULL);
+
+ // Nothing happens.
+ framebuffer_->PrepareDrawBuffersForClear();
+ framebuffer_->RestoreDrawBuffersAfterClear();
+}
+
+class FramebufferInfoFloatTest : public FramebufferInfoTest {
+ public:
+ FramebufferInfoFloatTest()
+ : FramebufferInfoTest() {
+ }
+ virtual ~FramebufferInfoFloatTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ InitializeContext("OpenGL ES 3.0",
+ "GL_OES_texture_float GL_EXT_color_buffer_float");
+ }
+};
+
+TEST_F(FramebufferInfoFloatTest, AttachFloatTexture) {
+ const GLuint kTextureClientId = 33;
+ const GLuint kTextureServiceId = 333;
+ const GLint kDepth = 1;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_FLOAT;
+ const GLsizei kWidth = 16;
+ const GLsizei kHeight = 32;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kInternalFormat = GL_RGBA32F;
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLsizei kSamples = 0;
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_COLOR_ATTACHMENT0));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT));
+ EXPECT_FALSE(framebuffer_->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
+
+ texture_manager_->CreateTexture(kTextureClientId, kTextureServiceId);
+ scoped_refptr<TextureRef> texture(
+ texture_manager_->GetTexture(kTextureClientId));
+ ASSERT_TRUE(texture.get() != NULL);
+
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture.get(), kTarget, kLevel, kSamples);
+ EXPECT_EQ(static_cast<GLenum>(0), framebuffer_->GetColorAttachmentFormat());
+
+ texture_manager_->SetTarget(texture.get(), GL_TEXTURE_2D);
+ texture_manager_->SetLevelInfo(texture.get(),
+ GL_TEXTURE_2D,
+ kLevel,
+ kInternalFormat,
+ kWidth,
+ kHeight,
+ kDepth,
+ kBorder,
+ kFormat,
+ kType,
+ false);
+ // Texture with a sized float internalformat is allowed as an attachment
+ // since float color attachment extension is present.
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ framebuffer_->IsPossiblyComplete());
+}
+
+TEST_F(FramebufferInfoTest, UnbindRenderbuffer) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kRenderbufferClient2Id = 34;
+ const GLuint kRenderbufferService2Id = 334;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient2Id, kRenderbufferService2Id);
+ Renderbuffer* renderbuffer2 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient2Id);
+ ASSERT_TRUE(renderbuffer2 != NULL);
+
+ // Attach to 2 attachment points.
+ framebuffer_->AttachRenderbuffer(GL_COLOR_ATTACHMENT0, renderbuffer1);
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ // Check they were attached.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind unattached renderbuffer.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer2);
+ // Should be no-op.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind renderbuffer.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ // Check they were detached
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) == NULL);
+}
+
+TEST_F(FramebufferInfoTest, UnbindTexture) {
+ const GLuint kTextureClient1Id = 33;
+ const GLuint kTextureService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ texture_manager_->CreateTexture(kTextureClient1Id, kTextureService1Id);
+ scoped_refptr<TextureRef> texture1(
+ texture_manager_->GetTexture(kTextureClient1Id));
+ ASSERT_TRUE(texture1.get() != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+
+ // Attach to 2 attachment points.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture1.get(), kTarget1, kLevel1, kSamples1);
+ framebuffer_->AttachTexture(
+ GL_DEPTH_ATTACHMENT, texture1.get(), kTarget1, kLevel1, kSamples1);
+ // Check they were attached.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind unattached texture.
+ framebuffer_->UnbindTexture(kTarget1, texture2.get());
+ // Should be no-op.
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) != NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) != NULL);
+ // Unbind texture.
+ framebuffer_->UnbindTexture(kTarget1, texture1.get());
+ // Check they were detached
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL);
+ EXPECT_TRUE(framebuffer_->GetAttachment(GL_DEPTH_ATTACHMENT) == NULL);
+}
+
+TEST_F(FramebufferInfoTest, IsCompleteMarkAsComplete) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+
+ // Check MarkAsComlete marks as complete.
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+
+ // Check at attaching marks as not complete.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+
+ // Check MarkAttachmentsAsCleared marks as complete.
+ manager_.MarkAttachmentsAsCleared(
+ framebuffer_, &renderbuffer_manager_, texture_manager_.get());
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+
+ // Check Unbind marks as not complete.
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+ manager_.MarkAsComplete(framebuffer_);
+ EXPECT_TRUE(manager_.IsComplete(framebuffer_));
+ framebuffer_->UnbindTexture(kTarget1, texture2.get());
+ EXPECT_FALSE(manager_.IsComplete(framebuffer_));
+}
+
+TEST_F(FramebufferInfoTest, GetStatus) {
+ const GLuint kRenderbufferClient1Id = 33;
+ const GLuint kRenderbufferService1Id = 333;
+ const GLuint kTextureClient2Id = 34;
+ const GLuint kTextureService2Id = 334;
+ const GLenum kTarget1 = GL_TEXTURE_2D;
+ const GLint kLevel1 = 0;
+ const GLint kSamples1 = 0;
+
+ renderbuffer_manager_.CreateRenderbuffer(
+ kRenderbufferClient1Id, kRenderbufferService1Id);
+ Renderbuffer* renderbuffer1 =
+ renderbuffer_manager_.GetRenderbuffer(kRenderbufferClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ texture_manager_->CreateTexture(kTextureClient2Id, kTextureService2Id);
+ scoped_refptr<TextureRef> texture2(
+ texture_manager_->GetTexture(kTextureClient2Id));
+ ASSERT_TRUE(texture2.get() != NULL);
+ texture_manager_->SetTarget(texture2.get(), GL_TEXTURE_2D);
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check changing the attachments calls CheckFramebufferStatus.
+ framebuffer_->AttachTexture(
+ GL_COLOR_ATTACHMENT0, texture2.get(), kTarget1, kLevel1, kSamples1);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE)).RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_FRAMEBUFFER);
+
+ // Check a second call with a different target calls CheckFramebufferStatus.
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check adding another attachment calls CheckFramebufferStatus.
+ framebuffer_->AttachRenderbuffer(GL_DEPTH_ATTACHMENT, renderbuffer1);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check a second call for the same type does not call anything.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check changing the format calls CheckFramebuffferStatus.
+ TestHelper::SetTexParameteriWithExpectations(gl_.get(),
+ error_state_.get(),
+ texture_manager_.get(),
+ texture2.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE,
+ GL_NO_ERROR);
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check since it did not return FRAMEBUFFER_COMPLETE that it calls
+ // CheckFramebufferStatus
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check putting it back does not call CheckFramebufferStatus.
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ TestHelper::SetTexParameteriWithExpectations(gl_.get(),
+ error_state_.get(),
+ texture_manager_.get(),
+ texture2.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_REPEAT,
+ GL_NO_ERROR);
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+
+ // Check Unbinding does not call CheckFramebufferStatus
+ framebuffer_->UnbindRenderbuffer(GL_RENDERBUFFER, renderbuffer1);
+ if (!framebuffer_->AllowFramebufferComboCompleteMapForTesting()) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ framebuffer_->GetStatus(texture_manager_.get(), GL_READ_FRAMEBUFFER);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/gl_context_virtual.cc b/gpu/command_buffer/service/gl_context_virtual.cc
new file mode 100644
index 0000000..a29e540
--- /dev/null
+++ b/gpu/command_buffer/service/gl_context_virtual.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+
+#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+
+GLContextVirtual::GLContextVirtual(
+ gfx::GLShareGroup* share_group,
+ gfx::GLContext* shared_context,
+ base::WeakPtr<gles2::GLES2Decoder> decoder)
+ : GLContext(share_group),
+ shared_context_(shared_context),
+ display_(NULL),
+ decoder_(decoder) {
+}
+
+gfx::Display* GLContextVirtual::display() {
+ return display_;
+}
+
+bool GLContextVirtual::Initialize(
+ gfx::GLSurface* compatible_surface, gfx::GpuPreference gpu_preference) {
+ SetGLStateRestorer(new GLStateRestorerImpl(decoder_));
+
+ display_ = static_cast<gfx::Display*>(compatible_surface->GetDisplay());
+
+ // Virtual contexts obviously can't make a context that is compatible
+ // with the surface (the context already exists), but we do need to
+ // make a context current for SetupForVirtualization() below.
+ if (!IsCurrent(compatible_surface)) {
+ if (!shared_context_->MakeCurrent(compatible_surface)) {
+ // This is likely an error. The real context should be made as
+ // compatible with all required surfaces when it was created.
+ LOG(ERROR) << "Failed MakeCurrent(compatible_surface)";
+ return false;
+ }
+ }
+
+ shared_context_->SetupForVirtualization();
+ shared_context_->MakeVirtuallyCurrent(this, compatible_surface);
+ return true;
+}
+
+void GLContextVirtual::Destroy() {
+ shared_context_->OnReleaseVirtuallyCurrent(this);
+ shared_context_ = NULL;
+ display_ = NULL;
+}
+
+bool GLContextVirtual::MakeCurrent(gfx::GLSurface* surface) {
+ if (decoder_.get())
+ return shared_context_->MakeVirtuallyCurrent(this, surface);
+
+ LOG(ERROR) << "Trying to make virtual context current without decoder.";
+ return false;
+}
+
+void GLContextVirtual::ReleaseCurrent(gfx::GLSurface* surface) {
+ if (IsCurrent(surface)) {
+ shared_context_->OnReleaseVirtuallyCurrent(this);
+ shared_context_->ReleaseCurrent(surface);
+ }
+}
+
+bool GLContextVirtual::IsCurrent(gfx::GLSurface* surface) {
+ // If it's a real surface it needs to be current.
+ if (surface &&
+ !surface->IsOffscreen())
+ return shared_context_->IsCurrent(surface);
+
+ // Otherwise, only insure the context itself is current.
+ return shared_context_->IsCurrent(NULL);
+}
+
+void* GLContextVirtual::GetHandle() {
+ return shared_context_->GetHandle();
+}
+
+void GLContextVirtual::SetSwapInterval(int interval) {
+ shared_context_->SetSwapInterval(interval);
+}
+
+std::string GLContextVirtual::GetExtensions() {
+ return shared_context_->GetExtensions();
+}
+
+bool GLContextVirtual::GetTotalGpuMemory(size_t* bytes) {
+ return shared_context_->GetTotalGpuMemory(bytes);
+}
+
+void GLContextVirtual::SetSafeToForceGpuSwitch() {
+ // TODO(ccameron): This will not work if two contexts that disagree
+ // about whether or not forced gpu switching may be done both share
+ // the same underlying shared_context_.
+ return shared_context_->SetSafeToForceGpuSwitch();
+}
+
+bool GLContextVirtual::WasAllocatedUsingRobustnessExtension() {
+ return shared_context_->WasAllocatedUsingRobustnessExtension();
+}
+
+void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
+ shared_context_->SetUnbindFboOnMakeCurrent();
+}
+
+GLContextVirtual::~GLContextVirtual() {
+ Destroy();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_context_virtual.h b/gpu/command_buffer/service/gl_context_virtual.h
new file mode 100644
index 0000000..fdecbdd
--- /dev/null
+++ b/gpu/command_buffer/service/gl_context_virtual.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_context.h"
+
+namespace gfx {
+class Display;
+class GLSurface;
+class GLStateRestorer;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+}
+
+// Encapsulates a virtual OpenGL context.
+class GPU_EXPORT GLContextVirtual : public gfx::GLContext {
+ public:
+ GLContextVirtual(
+ gfx::GLShareGroup* share_group,
+ gfx::GLContext* shared_context,
+ base::WeakPtr<gles2::GLES2Decoder> decoder);
+
+ gfx::Display* display();
+
+ // Implement GLContext.
+ virtual bool Initialize(
+ gfx::GLSurface* compatible_surface,
+ gfx::GpuPreference gpu_preference) OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+ virtual bool MakeCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual void ReleaseCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual bool IsCurrent(gfx::GLSurface* surface) OVERRIDE;
+ virtual void* GetHandle() OVERRIDE;
+ virtual void SetSwapInterval(int interval) OVERRIDE;
+ virtual std::string GetExtensions() OVERRIDE;
+ virtual bool GetTotalGpuMemory(size_t* bytes) OVERRIDE;
+ virtual void SetSafeToForceGpuSwitch() OVERRIDE;
+ virtual bool WasAllocatedUsingRobustnessExtension() OVERRIDE;
+ virtual void SetUnbindFboOnMakeCurrent() OVERRIDE;
+
+ protected:
+ virtual ~GLContextVirtual();
+
+ private:
+ scoped_refptr<gfx::GLContext> shared_context_;
+ gfx::Display* display_;
+ base::WeakPtr<gles2::GLES2Decoder> decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLContextVirtual);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_CONTEXT_VIRTUAL_H_
diff --git a/gpu/command_buffer/service/gl_state_restorer_impl.cc b/gpu/command_buffer/service/gl_state_restorer_impl.cc
new file mode 100644
index 0000000..5fbd425
--- /dev/null
+++ b/gpu/command_buffer/service/gl_state_restorer_impl.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+namespace gpu {
+
+GLStateRestorerImpl::GLStateRestorerImpl(
+ base::WeakPtr<gles2::GLES2Decoder> decoder)
+ : decoder_(decoder) {
+}
+
+GLStateRestorerImpl::~GLStateRestorerImpl() {
+}
+
+bool GLStateRestorerImpl::IsInitialized() {
+ DCHECK(decoder_.get());
+ return decoder_->initialized();
+}
+
+void GLStateRestorerImpl::RestoreState(const gfx::GLStateRestorer* prev_state) {
+ DCHECK(decoder_.get());
+ const GLStateRestorerImpl* restorer_impl =
+ static_cast<const GLStateRestorerImpl*>(prev_state);
+ decoder_->RestoreState(
+ restorer_impl ? restorer_impl->GetContextState() : NULL);
+}
+
+void GLStateRestorerImpl::RestoreAllTextureUnitBindings() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreAllTextureUnitBindings(NULL);
+}
+
+void GLStateRestorerImpl::RestoreActiveTextureUnitBinding(unsigned int target) {
+ DCHECK(decoder_.get());
+ decoder_->RestoreActiveTextureUnitBinding(target);
+}
+
+void GLStateRestorerImpl::RestoreFramebufferBindings() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreFramebufferBindings();
+}
+
+const gles2::ContextState* GLStateRestorerImpl::GetContextState() const {
+ DCHECK(decoder_.get());
+ return decoder_->GetContextState();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_state_restorer_impl.h b/gpu/command_buffer/service/gl_state_restorer_impl.h
new file mode 100644
index 0000000..73534b8
--- /dev/null
+++ b/gpu/command_buffer/service/gl_state_restorer_impl.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GLStateRestorerImpl class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_state_restorer.h"
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+struct ContextState;
+}
+
+// This class implements a GLStateRestorer that forwards to a GLES2Decoder.
+class GPU_EXPORT GLStateRestorerImpl : public gfx::GLStateRestorer {
+ public:
+ explicit GLStateRestorerImpl(base::WeakPtr<gles2::GLES2Decoder> decoder);
+ virtual ~GLStateRestorerImpl();
+
+ virtual bool IsInitialized() OVERRIDE;
+ virtual void RestoreState(const gfx::GLStateRestorer* prev_state) OVERRIDE;
+ virtual void RestoreAllTextureUnitBindings() OVERRIDE;
+ virtual void RestoreActiveTextureUnitBinding(unsigned int target) OVERRIDE;
+ virtual void RestoreFramebufferBindings() OVERRIDE;
+
+ private:
+ const gles2::ContextState* GetContextState() const;
+ base::WeakPtr<gles2::GLES2Decoder> decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLStateRestorerImpl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_STATE_RESTORER_IMPL_H_
diff --git a/gpu/command_buffer/service/gl_surface_mock.cc b/gpu/command_buffer/service/gl_surface_mock.cc
new file mode 100644
index 0000000..9706a18
--- /dev/null
+++ b/gpu/command_buffer/service/gl_surface_mock.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+
+namespace gpu {
+
+GLSurfaceMock::GLSurfaceMock() {
+}
+
+GLSurfaceMock::~GLSurfaceMock() {
+}
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gl_surface_mock.h b/gpu/command_buffer/service/gl_surface_mock.h
new file mode 100644
index 0000000..0652be6
--- /dev/null
+++ b/gpu/command_buffer/service/gl_surface_mock.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
+
+#include "ui/gl/gl_surface.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class GLSurfaceMock : public gfx::GLSurface {
+ public:
+ GLSurfaceMock();
+
+ MOCK_METHOD0(Initialize, bool());
+ MOCK_METHOD0(Destroy, void());
+ MOCK_METHOD1(Resize, bool(const gfx::Size& size));
+ MOCK_METHOD0(IsOffscreen, bool());
+ MOCK_METHOD0(SwapBuffers, bool());
+ MOCK_METHOD4(PostSubBuffer, bool(int x, int y, int width, int height));
+ MOCK_METHOD0(SupportsPostSubBuffer, bool());
+ MOCK_METHOD0(GetSize, gfx::Size());
+ MOCK_METHOD0(GetHandle, void*());
+ MOCK_METHOD0(GetBackingFrameBufferObject, unsigned int());
+ MOCK_METHOD1(OnMakeCurrent, bool(gfx::GLContext* context));
+ MOCK_METHOD1(SetBackbufferAllocation, bool(bool allocated));
+ MOCK_METHOD1(SetFrontbufferAllocation, void(bool allocated));
+ MOCK_METHOD0(GetShareHandle, void*());
+ MOCK_METHOD0(GetDisplay, void*());
+ MOCK_METHOD0(GetConfig, void*());
+ MOCK_METHOD0(GetFormat, unsigned());
+
+ protected:
+ virtual ~GLSurfaceMock();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLSurfaceMock);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_SURFACE_MOCK_H_
diff --git a/gpu/command_buffer/service/gl_utils.h b/gpu/command_buffer/service/gl_utils.h
new file mode 100644
index 0000000..ade4a37
--- /dev/null
+++ b/gpu/command_buffer/service/gl_utils.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file includes all the necessary GL headers and implements some useful
+// utilities.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
+
+#include "build/build_config.h"
+#include "ui/gl/gl_bindings.h"
+
+// Define this for extra GL error debugging (slower).
+// #define GL_ERROR_DEBUGGING
+#ifdef GL_ERROR_DEBUGGING
+#define CHECK_GL_ERROR() do { \
+ GLenum gl_error = glGetError(); \
+ LOG_IF(ERROR, gl_error != GL_NO_ERROR) << "GL Error :" << gl_error; \
+ } while (0)
+#else // GL_ERROR_DEBUGGING
+#define CHECK_GL_ERROR() void(0)
+#endif // GL_ERROR_DEBUGGING
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
new file mode 100644
index 0000000..f98ca2e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -0,0 +1,482 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
+
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#define SHADER(src) \
+ "#ifdef GL_ES\n" \
+ "precision mediump float;\n" \
+ "#define TexCoordPrecision mediump\n" \
+ "#else\n" \
+ "#define TexCoordPrecision\n" \
+ "#endif\n" #src
+#define SHADER_2D(src) \
+ "#define SamplerType sampler2D\n" \
+ "#define TextureLookup texture2D\n" SHADER(src)
+#define SHADER_RECTANGLE_ARB(src) \
+ "#define SamplerType samplerRect\n" \
+ "#define TextureLookup textureRect\n" SHADER(src)
+#define SHADER_EXTERNAL_OES(src) \
+ "#extension GL_OES_EGL_image_external : require\n" \
+ "#define SamplerType samplerExternalOES\n" \
+ "#define TextureLookup texture2D\n" SHADER(src)
+#define FRAGMENT_SHADERS(src) \
+ SHADER_2D(src), SHADER_RECTANGLE_ARB(src), SHADER_EXTERNAL_OES(src)
+
+namespace {
+
+enum VertexShaderId {
+ VERTEX_SHADER_COPY_TEXTURE,
+ VERTEX_SHADER_COPY_TEXTURE_FLIP_Y,
+ NUM_VERTEX_SHADERS,
+};
+
+enum FragmentShaderId {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
+ NUM_FRAGMENT_SHADERS,
+};
+
+const char* vertex_shader_source[NUM_VERTEX_SHADERS] = {
+ // VERTEX_SHADER_COPY_TEXTURE
+ SHADER(
+ uniform mat4 u_matrix;
+ uniform vec2 u_half_size;
+ attribute vec4 a_position;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_Position = u_matrix * a_position;
+ v_uv = a_position.xy * vec2(u_half_size.s, u_half_size.t) +
+ vec2(u_half_size.s, u_half_size.t);
+ }),
+ // VERTEX_SHADER_COPY_TEXTURE_FLIP_Y
+ SHADER(
+ uniform mat4 u_matrix;
+ uniform vec2 u_half_size;
+ attribute vec4 a_position;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_Position = u_matrix * a_position;
+ v_uv = a_position.xy * vec2(u_half_size.s, -u_half_size.t) +
+ vec2(u_half_size.s, u_half_size.t);
+ }),
+};
+
+const char* fragment_shader_source[NUM_FRAGMENT_SHADERS] = {
+ // FRAGMENT_SHADER_COPY_TEXTURE_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ }),
+ // FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ gl_FragColor.rgb *= gl_FragColor.a;
+ }),
+ // FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_*
+ FRAGMENT_SHADERS(
+ uniform SamplerType u_sampler;
+ varying TexCoordPrecision vec2 v_uv;
+ void main(void) {
+ gl_FragColor = TextureLookup(u_sampler, v_uv.st);
+ if (gl_FragColor.a > 0.0)
+ gl_FragColor.rgb /= gl_FragColor.a;
+ }),
+};
+
+// Returns the correct vertex shader id to evaluate the copy operation for
+// the CHROMIUM_flipy setting.
+VertexShaderId GetVertexShaderId(bool flip_y) {
+ // bit 0: flip y
+ static VertexShaderId shader_ids[] = {
+ VERTEX_SHADER_COPY_TEXTURE,
+ VERTEX_SHADER_COPY_TEXTURE_FLIP_Y,
+ };
+
+ unsigned index = flip_y ? 1 : 0;
+ return shader_ids[index];
+}
+
+// Returns the correct fragment shader id to evaluate the copy operation for
+// the premultiply alpha pixel store settings and target.
+FragmentShaderId GetFragmentShaderId(bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ GLenum target) {
+ enum {
+ SAMPLER_2D,
+ SAMPLER_RECTANGLE_ARB,
+ SAMPLER_EXTERNAL_OES,
+ NUM_SAMPLERS
+ };
+
+ // bit 0: premultiply alpha
+ // bit 1: unpremultiply alpha
+ static FragmentShaderId shader_ids[][NUM_SAMPLERS] = {
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
+ },
+ {
+ FRAGMENT_SHADER_COPY_TEXTURE_2D,
+ FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
+ FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
+ }};
+
+ unsigned index = (premultiply_alpha ? (1 << 0) : 0) |
+ (unpremultiply_alpha ? (1 << 1) : 0);
+
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return shader_ids[index][SAMPLER_2D];
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return shader_ids[index][SAMPLER_RECTANGLE_ARB];
+ case GL_TEXTURE_EXTERNAL_OES:
+ return shader_ids[index][SAMPLER_EXTERNAL_OES];
+ default:
+ break;
+ }
+
+ NOTREACHED();
+ return shader_ids[0][SAMPLER_2D];
+}
+
+void CompileShader(GLuint shader, const char* shader_source) {
+ glShaderSource(shader, 1, &shader_source, 0);
+ glCompileShader(shader);
+#ifndef NDEBUG
+ GLint compile_status;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
+ if (GL_TRUE != compile_status)
+ DLOG(ERROR) << "CopyTextureCHROMIUM: shader compilation failure.";
+#endif
+}
+
+void DeleteShader(GLuint shader) {
+ if (shader)
+ glDeleteShader(shader);
+}
+
+bool BindFramebufferTexture2D(GLenum target,
+ GLuint texture_id,
+ GLint level,
+ GLuint framebuffer) {
+ DCHECK(target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE_ARB);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(target, texture_id);
+ // NVidia drivers require texture settings to be a certain way
+ // or they won't report FRAMEBUFFER_COMPLETE.
+ glTexParameterf(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebuffer);
+ glFramebufferTexture2DEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, texture_id, level);
+
+#ifndef NDEBUG
+ GLenum fb_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ if (GL_FRAMEBUFFER_COMPLETE != fb_status) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Incomplete framebuffer.";
+ return false;
+ }
+#endif
+ return true;
+}
+
+void DoCopyTexImage2D(const gpu::gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLuint framebuffer) {
+ DCHECK(source_target == GL_TEXTURE_2D ||
+ source_target == GL_TEXTURE_RECTANGLE_ARB);
+ if (BindFramebufferTexture2D(
+ source_target, source_id, 0 /* level */, framebuffer)) {
+ glBindTexture(GL_TEXTURE_2D, dest_id);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glCopyTexImage2D(GL_TEXTURE_2D,
+ dest_level,
+ dest_internal_format,
+ 0 /* x */,
+ 0 /* y */,
+ width,
+ height,
+ 0 /* border */);
+ }
+
+ decoder->RestoreTextureState(source_id);
+ decoder->RestoreTextureState(dest_id);
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreFramebufferBindings();
+}
+
+} // namespace
+
+namespace gpu {
+
+CopyTextureCHROMIUMResourceManager::CopyTextureCHROMIUMResourceManager()
+ : initialized_(false),
+ vertex_shaders_(NUM_VERTEX_SHADERS, 0u),
+ fragment_shaders_(NUM_FRAGMENT_SHADERS, 0u),
+ buffer_id_(0u),
+ framebuffer_(0u) {}
+
+CopyTextureCHROMIUMResourceManager::~CopyTextureCHROMIUMResourceManager() {
+ DCHECK(!buffer_id_);
+ DCHECK(!framebuffer_);
+}
+
+void CopyTextureCHROMIUMResourceManager::Initialize(
+ const gles2::GLES2Decoder* decoder) {
+ COMPILE_ASSERT(
+ kVertexPositionAttrib == 0u,
+ Position_attribs_must_be_0);
+ DCHECK(!buffer_id_);
+ DCHECK(!framebuffer_);
+ DCHECK(programs_.empty());
+
+ // Initialize all of the GPU resources required to perform the copy.
+ glGenBuffersARB(1, &buffer_id_);
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_id_);
+ const GLfloat kQuadVertices[] = {-1.0f, -1.0f,
+ 1.0f, -1.0f,
+ 1.0f, 1.0f,
+ -1.0f, 1.0f};
+ glBufferData(
+ GL_ARRAY_BUFFER, sizeof(kQuadVertices), kQuadVertices, GL_STATIC_DRAW);
+
+ glGenFramebuffersEXT(1, &framebuffer_);
+
+ decoder->RestoreBufferBindings();
+
+ initialized_ = true;
+}
+
+void CopyTextureCHROMIUMResourceManager::Destroy() {
+ if (!initialized_)
+ return;
+
+ glDeleteFramebuffersEXT(1, &framebuffer_);
+ framebuffer_ = 0;
+
+ std::for_each(vertex_shaders_.begin(), vertex_shaders_.end(), DeleteShader);
+ std::for_each(
+ fragment_shaders_.begin(), fragment_shaders_.end(), DeleteShader);
+
+ for (ProgramMap::const_iterator it = programs_.begin(); it != programs_.end();
+ ++it) {
+ const ProgramInfo& info = it->second;
+ glDeleteProgram(info.program);
+ }
+
+ glDeleteBuffersARB(1, &buffer_id_);
+ buffer_id_ = 0;
+}
+
+void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
+ const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLenum source_internal_format,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha) {
+ bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
+ // GL_INVALID_OPERATION is generated if the currently bound framebuffer's
+ // format does not contain a superset of the components required by the base
+ // format of internalformat.
+ // https://www.khronos.org/opengles/sdk/docs/man/xhtml/glCopyTexImage2D.xml
+ bool source_format_contain_superset_of_dest_format =
+ source_internal_format == dest_internal_format ||
+ (source_internal_format == GL_RGBA && dest_internal_format == GL_RGB);
+ // GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
+ // so restrict this to GL_TEXTURE_2D.
+ if (source_target == GL_TEXTURE_2D && !flip_y && !premultiply_alpha_change &&
+ source_format_contain_superset_of_dest_format) {
+ DoCopyTexImage2D(decoder,
+ source_target,
+ source_id,
+ dest_id,
+ dest_level,
+ dest_internal_format,
+ width,
+ height,
+ framebuffer_);
+ return;
+ }
+
+ // Use default transform matrix if no transform passed in.
+ const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+ DoCopyTextureWithTransform(decoder,
+ source_target,
+ source_id,
+ dest_id,
+ dest_level,
+ width,
+ height,
+ flip_y,
+ premultiply_alpha,
+ unpremultiply_alpha,
+ default_matrix);
+}
+
+void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
+ const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ const GLfloat transform_matrix[16]) {
+ DCHECK(source_target == GL_TEXTURE_2D ||
+ source_target == GL_TEXTURE_RECTANGLE_ARB ||
+ source_target == GL_TEXTURE_EXTERNAL_OES);
+ if (!initialized_) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Uninitialized manager.";
+ return;
+ }
+
+ VertexShaderId vertex_shader_id = GetVertexShaderId(flip_y);
+ DCHECK_LT(static_cast<size_t>(vertex_shader_id), vertex_shaders_.size());
+ FragmentShaderId fragment_shader_id = GetFragmentShaderId(
+ premultiply_alpha, unpremultiply_alpha, source_target);
+ DCHECK_LT(static_cast<size_t>(fragment_shader_id), fragment_shaders_.size());
+
+ ProgramMapKey key(vertex_shader_id, fragment_shader_id);
+ ProgramInfo* info = &programs_[key];
+ // Create program if necessary.
+ if (!info->program) {
+ info->program = glCreateProgram();
+ GLuint* vertex_shader = &vertex_shaders_[vertex_shader_id];
+ if (!*vertex_shader) {
+ *vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ CompileShader(*vertex_shader, vertex_shader_source[vertex_shader_id]);
+ }
+ glAttachShader(info->program, *vertex_shader);
+ GLuint* fragment_shader = &fragment_shaders_[fragment_shader_id];
+ if (!*fragment_shader) {
+ *fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
+ CompileShader(*fragment_shader,
+ fragment_shader_source[fragment_shader_id]);
+ }
+ glAttachShader(info->program, *fragment_shader);
+ glBindAttribLocation(info->program, kVertexPositionAttrib, "a_position");
+ glLinkProgram(info->program);
+#ifndef NDEBUG
+ GLint linked;
+ glGetProgramiv(info->program, GL_LINK_STATUS, &linked);
+ if (!linked)
+ DLOG(ERROR) << "CopyTextureCHROMIUM: program link failure.";
+#endif
+ info->matrix_handle = glGetUniformLocation(info->program, "u_matrix");
+ info->half_size_handle = glGetUniformLocation(info->program, "u_half_size");
+ info->sampler_handle = glGetUniformLocation(info->program, "u_sampler");
+ }
+ glUseProgram(info->program);
+
+#ifndef NDEBUG
+ glValidateProgram(info->program);
+ GLint validation_status;
+ glGetProgramiv(info->program, GL_VALIDATE_STATUS, &validation_status);
+ if (GL_TRUE != validation_status) {
+ DLOG(ERROR) << "CopyTextureCHROMIUM: Invalid shader.";
+ return;
+ }
+#endif
+
+ glUniformMatrix4fv(info->matrix_handle, 1, GL_FALSE, transform_matrix);
+ if (source_target == GL_TEXTURE_RECTANGLE_ARB)
+ glUniform2f(info->half_size_handle, width / 2.0f, height / 2.0f);
+ else
+ glUniform2f(info->half_size_handle, 0.5f, 0.5f);
+
+ if (BindFramebufferTexture2D(
+ GL_TEXTURE_2D, dest_id, dest_level, framebuffer_)) {
+ decoder->ClearAllAttributes();
+ glEnableVertexAttribArray(kVertexPositionAttrib);
+
+ glBindBuffer(GL_ARRAY_BUFFER, buffer_id_);
+ glVertexAttribPointer(kVertexPositionAttrib, 2, GL_FLOAT, GL_FALSE, 0, 0);
+
+ glUniform1i(info->sampler_handle, 0);
+
+ glBindTexture(source_target, source_id);
+ glTexParameterf(source_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(source_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(source_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(source_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glDisable(GL_DEPTH_TEST);
+ glDisable(GL_SCISSOR_TEST);
+ glDisable(GL_STENCIL_TEST);
+ glDisable(GL_CULL_FACE);
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glDepthMask(GL_FALSE);
+ glDisable(GL_BLEND);
+
+ glViewport(0, 0, width, height);
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+ }
+
+ decoder->RestoreAllAttributes();
+ decoder->RestoreTextureState(source_id);
+ decoder->RestoreTextureState(dest_id);
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreProgramBindings();
+ decoder->RestoreBufferBindings();
+ decoder->RestoreFramebufferBindings();
+ decoder->RestoreGlobalState();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
new file mode 100644
index 0000000..083fc4c
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
+
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Decoder;
+
+} // namespace gles2.
+
+// This class encapsulates the resources required to implement the
+// GL_CHROMIUM_copy_texture extension. The copy operation is performed
+// via glCopyTexImage2D() or a blit to a framebuffer object.
+// The target of |dest_id| texture must be GL_TEXTURE_2D.
+class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
+ public:
+ CopyTextureCHROMIUMResourceManager();
+ ~CopyTextureCHROMIUMResourceManager();
+
+ void Initialize(const gles2::GLES2Decoder* decoder);
+ void Destroy();
+
+ void DoCopyTexture(const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLenum source_internal_format,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha);
+
+ // This will apply a transform on the source texture before copying to
+ // destination texture.
+ void DoCopyTextureWithTransform(const gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLuint dest_id,
+ GLint dest_level,
+ GLsizei width,
+ GLsizei height,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha,
+ const GLfloat transform_matrix[16]);
+
+ // The attributes used during invocation of the extension.
+ static const GLuint kVertexPositionAttrib = 0;
+
+ private:
+ struct ProgramInfo {
+ ProgramInfo()
+ : program(0u),
+ matrix_handle(0u),
+ half_size_handle(0u),
+ sampler_handle(0u) {}
+
+ GLuint program;
+ GLuint matrix_handle;
+ GLuint half_size_handle;
+ GLuint sampler_handle;
+ };
+
+ bool initialized_;
+ typedef std::vector<GLuint> ShaderVector;
+ ShaderVector vertex_shaders_;
+ ShaderVector fragment_shaders_;
+ typedef std::pair<int, int> ProgramMapKey;
+ typedef base::hash_map<ProgramMapKey, ProgramInfo> ProgramMap;
+ ProgramMap programs_;
+ GLuint buffer_id_;
+ GLuint framebuffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CopyTextureCHROMIUMResourceManager);
+};
+
+} // namespace gpu.
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_COPY_TEXTURE_CHROMIUM_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
new file mode 100644
index 0000000..9bf037f
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -0,0 +1,11153 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_synthetic_delay.h"
+#include "base/float_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "build/build_config.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/gpu_state_tracer.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "third_party/smhasher/src/City.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_surface.h"
+
+#if defined(OS_MACOSX)
+#include <IOSurface/IOSurfaceAPI.h>
+// Note that this must be included after gl_bindings.h to avoid conflicts.
+#include <OpenGL/CGLIOSurface.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/win_util.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+static const char kOESDerivativeExtension[] = "GL_OES_standard_derivatives";
+static const char kEXTFragDepthExtension[] = "GL_EXT_frag_depth";
+static const char kEXTDrawBuffersExtension[] = "GL_EXT_draw_buffers";
+static const char kEXTShaderTextureLodExtension[] = "GL_EXT_shader_texture_lod";
+
+static bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
+ GLint rangeMax,
+ GLint precision) {
+ return (rangeMin >= 62) && (rangeMax >= 62) && (precision >= 16);
+}
+
+static void GetShaderPrecisionFormatImpl(GLenum shader_type,
+ GLenum precision_type,
+ GLint *range, GLint *precision) {
+ switch (precision_type) {
+ case GL_LOW_INT:
+ case GL_MEDIUM_INT:
+ case GL_HIGH_INT:
+ // These values are for a 32-bit twos-complement integer format.
+ range[0] = 31;
+ range[1] = 30;
+ *precision = 0;
+ break;
+ case GL_LOW_FLOAT:
+ case GL_MEDIUM_FLOAT:
+ case GL_HIGH_FLOAT:
+ // These values are for an IEEE single-precision floating-point format.
+ range[0] = 127;
+ range[1] = 127;
+ *precision = 23;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2 &&
+ gfx::g_driver_gl.fn.glGetShaderPrecisionFormatFn) {
+ // This function is sometimes defined even though it's really just
+ // a stub, so we need to set range and precision as if it weren't
+ // defined before calling it.
+ // On Mac OS with some GPUs, calling this generates a
+ // GL_INVALID_OPERATION error. Avoid calling it on non-GLES2
+ // platforms.
+ glGetShaderPrecisionFormat(shader_type, precision_type,
+ range, precision);
+
+ // TODO(brianderson): Make the following official workarounds.
+
+ // Some drivers have bugs where they report the ranges as a negative number.
+ // Taking the absolute value here shouldn't hurt because negative numbers
+ // aren't expected anyway.
+ range[0] = abs(range[0]);
+ range[1] = abs(range[1]);
+
+ // If the driver reports a precision for highp float that isn't actually
+ // highp, don't pretend like it's supported because shader compilation will
+ // fail anyway.
+ if (precision_type == GL_HIGH_FLOAT &&
+ !PrecisionMeetsSpecForHighpFloat(range[0], range[1], *precision)) {
+ range[0] = 0;
+ range[1] = 0;
+ *precision = 0;
+ }
+ }
+}
+
+static gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform) {
+ switch (plane_transform) {
+ case GL_OVERLAY_TRANSFORM_NONE_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_NONE;
+ case GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL;
+ case GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL;
+ case GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_90;
+ case GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_180;
+ case GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_270;
+ default:
+ return gfx::OVERLAY_TRANSFORM_INVALID;
+ }
+}
+
+} // namespace
+
+class GLES2DecoderImpl;
+
+// Local versions of the SET_GL_ERROR macros
+#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
+ ERRORSTATE_SET_GL_ERROR(state_.GetErrorState(), error, function_name, msg)
+#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(state_.GetErrorState(), \
+ function_name, value, label)
+#define LOCAL_SET_GL_ERROR_INVALID_PARAM(error, function_name, pname) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAM(state_.GetErrorState(), error, \
+ function_name, pname)
+#define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(state_.GetErrorState(), \
+ function_name)
+#define LOCAL_PEEK_GL_ERROR(function_name) \
+ ERRORSTATE_PEEK_GL_ERROR(state_.GetErrorState(), function_name)
+#define LOCAL_CLEAR_REAL_GL_ERRORS(function_name) \
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(state_.GetErrorState(), function_name)
+#define LOCAL_PERFORMANCE_WARNING(msg) \
+ PerformanceWarning(__FILE__, __LINE__, msg)
+#define LOCAL_RENDER_WARNING(msg) \
+ RenderWarning(__FILE__, __LINE__, msg)
+
+// Check that certain assumptions the code makes are true. There are places in
+// the code where shared memory is passed direclty to GL. Example, glUniformiv,
+// glShaderSource. The command buffer code assumes GLint and GLsizei (and maybe
+// a few others) are 32bits. If they are not 32bits the code will have to change
+// to call those GL functions with service side memory and then copy the results
+// to shared memory, converting the sizes.
+COMPILE_ASSERT(sizeof(GLint) == sizeof(uint32), // NOLINT
+ GLint_not_same_size_as_uint32);
+COMPILE_ASSERT(sizeof(GLsizei) == sizeof(uint32), // NOLINT
+ GLint_not_same_size_as_uint32);
+COMPILE_ASSERT(sizeof(GLfloat) == sizeof(float), // NOLINT
+ GLfloat_not_same_size_as_float);
+
+// TODO(kbr): the use of this anonymous namespace core dumps the
+// linker on Mac OS X 10.6 when the symbol ordering file is used
+// namespace {
+
+// Returns the address of the first byte after a struct.
+template <typename T>
+const void* AddressAfterStruct(const T& pod) {
+ return reinterpret_cast<const uint8*>(&pod) + sizeof(pod);
+}
+
+// Returns the address of the frst byte after the struct or NULL if size >
+// immediate_data_size.
+template <typename RETURN_TYPE, typename COMMAND_TYPE>
+RETURN_TYPE GetImmediateDataAs(const COMMAND_TYPE& pod,
+ uint32 size,
+ uint32 immediate_data_size) {
+ return (size <= immediate_data_size) ?
+ static_cast<RETURN_TYPE>(const_cast<void*>(AddressAfterStruct(pod))) :
+ NULL;
+}
+
+// Computes the data size for certain gl commands like glUniform.
+bool ComputeDataSize(
+ GLuint count,
+ size_t size,
+ unsigned int elements_per_unit,
+ uint32* dst) {
+ uint32 value;
+ if (!SafeMultiplyUint32(count, size, &value)) {
+ return false;
+ }
+ if (!SafeMultiplyUint32(value, elements_per_unit, &value)) {
+ return false;
+ }
+ *dst = value;
+ return true;
+}
+
+// Return true if a character belongs to the ASCII subset as defined in
+// GLSL ES 1.0 spec section 3.1.
+static bool CharacterIsValidForGLES(unsigned char c) {
+ // Printing characters are valid except " $ ` @ \ ' DEL.
+ if (c >= 32 && c <= 126 &&
+ c != '"' &&
+ c != '$' &&
+ c != '`' &&
+ c != '@' &&
+ c != '\\' &&
+ c != '\'') {
+ return true;
+ }
+ // Horizontal tab, line feed, vertical tab, form feed, carriage return
+ // are also valid.
+ if (c >= 9 && c <= 13) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool StringIsValidForGLES(const char* str) {
+ for (; *str; ++str) {
+ if (!CharacterIsValidForGLES(*str)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This class prevents any GL errors that occur when it is in scope from
+// being reported to the client.
+class ScopedGLErrorSuppressor {
+ public:
+ explicit ScopedGLErrorSuppressor(
+ const char* function_name, ErrorState* error_state);
+ ~ScopedGLErrorSuppressor();
+ private:
+ const char* function_name_;
+ ErrorState* error_state_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedGLErrorSuppressor);
+};
+
+// Temporarily changes a decoder's bound texture and restore it when this
+// object goes out of scope. Also temporarily switches to using active texture
+// unit zero in case the client has changed that to something invalid.
+class ScopedTextureBinder {
+ public:
+ explicit ScopedTextureBinder(ContextState* state, GLuint id, GLenum target);
+ ~ScopedTextureBinder();
+
+ private:
+ ContextState* state_;
+ GLenum target_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTextureBinder);
+};
+
+// Temporarily changes a decoder's bound render buffer and restore it when this
+// object goes out of scope.
+class ScopedRenderBufferBinder {
+ public:
+ explicit ScopedRenderBufferBinder(ContextState* state, GLuint id);
+ ~ScopedRenderBufferBinder();
+
+ private:
+ ContextState* state_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedRenderBufferBinder);
+};
+
+// Temporarily changes a decoder's bound frame buffer and restore it when this
+// object goes out of scope.
+class ScopedFrameBufferBinder {
+ public:
+ explicit ScopedFrameBufferBinder(GLES2DecoderImpl* decoder, GLuint id);
+ ~ScopedFrameBufferBinder();
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedFrameBufferBinder);
+};
+
+// Temporarily changes a decoder's bound frame buffer to a resolved version of
+// the multisampled offscreen render buffer if that buffer is multisampled, and,
+// if it is bound or enforce_internal_framebuffer is true. If internal is
+// true, the resolved framebuffer is not visible to the parent.
+class ScopedResolvedFrameBufferBinder {
+ public:
+ explicit ScopedResolvedFrameBufferBinder(GLES2DecoderImpl* decoder,
+ bool enforce_internal_framebuffer,
+ bool internal);
+ ~ScopedResolvedFrameBufferBinder();
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ bool resolve_and_bind_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedResolvedFrameBufferBinder);
+};
+
+class ScopedModifyPixels {
+ public:
+ explicit ScopedModifyPixels(TextureRef* ref);
+ ~ScopedModifyPixels();
+
+ private:
+ TextureRef* ref_;
+};
+
+ScopedModifyPixels::ScopedModifyPixels(TextureRef* ref) : ref_(ref) {
+ if (ref_)
+ ref_->texture()->OnWillModifyPixels();
+}
+
+ScopedModifyPixels::~ScopedModifyPixels() {
+ if (ref_)
+ ref_->texture()->OnDidModifyPixels();
+}
+
+class ScopedRenderTo {
+ public:
+ explicit ScopedRenderTo(Framebuffer* framebuffer);
+ ~ScopedRenderTo();
+
+ private:
+ const Framebuffer* framebuffer_;
+};
+
+ScopedRenderTo::ScopedRenderTo(Framebuffer* framebuffer)
+ : framebuffer_(framebuffer) {
+ if (framebuffer)
+ framebuffer_->OnWillRenderTo();
+}
+
+ScopedRenderTo::~ScopedRenderTo() {
+ if (framebuffer_)
+ framebuffer_->OnDidRenderTo();
+}
+
+// Encapsulates an OpenGL texture.
+class BackTexture {
+ public:
+ explicit BackTexture(MemoryTracker* memory_tracker, ContextState* state);
+ ~BackTexture();
+
+ // Create a new render texture.
+ void Create();
+
+ // Set the initial size and format of a render texture or resize it.
+ bool AllocateStorage(const gfx::Size& size, GLenum format, bool zero);
+
+ // Copy the contents of the currently bound frame buffer.
+ void Copy(const gfx::Size& size, GLenum format);
+
+ // Destroy the render texture. This must be explicitly called before
+ // destroying this object.
+ void Destroy();
+
+ // Invalidate the texture. This can be used when a context is lost and it is
+ // not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ gfx::Size size() const {
+ return size_;
+ }
+
+ private:
+ MemoryTypeTracker memory_tracker_;
+ ContextState* state_;
+ size_t bytes_allocated_;
+ GLuint id_;
+ gfx::Size size_;
+ DISALLOW_COPY_AND_ASSIGN(BackTexture);
+};
+
+// Encapsulates an OpenGL render buffer of any format.
+class BackRenderbuffer {
+ public:
+ explicit BackRenderbuffer(
+ RenderbufferManager* renderbuffer_manager,
+ MemoryTracker* memory_tracker,
+ ContextState* state);
+ ~BackRenderbuffer();
+
+ // Create a new render buffer.
+ void Create();
+
+ // Set the initial size and format of a render buffer or resize it.
+ bool AllocateStorage(const FeatureInfo* feature_info,
+ const gfx::Size& size,
+ GLenum format,
+ GLsizei samples);
+
+ // Destroy the render buffer. This must be explicitly called before destroying
+ // this object.
+ void Destroy();
+
+ // Invalidate the render buffer. This can be used when a context is lost and
+ // it is not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ private:
+ RenderbufferManager* renderbuffer_manager_;
+ MemoryTypeTracker memory_tracker_;
+ ContextState* state_;
+ size_t bytes_allocated_;
+ GLuint id_;
+ DISALLOW_COPY_AND_ASSIGN(BackRenderbuffer);
+};
+
+// Encapsulates an OpenGL frame buffer.
+class BackFramebuffer {
+ public:
+ explicit BackFramebuffer(GLES2DecoderImpl* decoder);
+ ~BackFramebuffer();
+
+ // Create a new frame buffer.
+ void Create();
+
+ // Attach a color render buffer to a frame buffer.
+ void AttachRenderTexture(BackTexture* texture);
+
+ // Attach a render buffer to a frame buffer. Note that this unbinds any
+ // currently bound frame buffer.
+ void AttachRenderBuffer(GLenum target, BackRenderbuffer* render_buffer);
+
+ // Destroy the frame buffer. This must be explicitly called before destroying
+ // this object.
+ void Destroy();
+
+ // Invalidate the frame buffer. This can be used when a context is lost and it
+ // is not possible to make it current in order to free the resource.
+ void Invalidate();
+
+ // See glCheckFramebufferStatusEXT.
+ GLenum CheckStatus();
+
+ GLuint id() const {
+ return id_;
+ }
+
+ private:
+ GLES2DecoderImpl* decoder_;
+ GLuint id_;
+ DISALLOW_COPY_AND_ASSIGN(BackFramebuffer);
+};
+
+struct FenceCallback {
+ explicit FenceCallback()
+ : fence(gfx::GLFence::Create()) {
+ DCHECK(fence);
+ }
+ std::vector<base::Closure> callbacks;
+ scoped_ptr<gfx::GLFence> fence;
+};
+
+class AsyncUploadTokenCompletionObserver
+ : public AsyncPixelTransferCompletionObserver {
+ public:
+ explicit AsyncUploadTokenCompletionObserver(uint32 async_upload_token)
+ : async_upload_token_(async_upload_token) {
+ }
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) OVERRIDE {
+ DCHECK(mem_params.buffer().get());
+ void* data = mem_params.GetDataAddress();
+ AsyncUploadSync* sync = static_cast<AsyncUploadSync*>(data);
+ sync->SetAsyncUploadToken(async_upload_token_);
+ }
+
+ private:
+ virtual ~AsyncUploadTokenCompletionObserver() {
+ }
+
+ uint32 async_upload_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncUploadTokenCompletionObserver);
+};
+
+// } // anonymous namespace.
+
+// static
+const unsigned int GLES2Decoder::kDefaultStencilMask =
+ static_cast<unsigned int>(-1);
+
+bool GLES2Decoder::GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ return false;
+}
+
+GLES2Decoder::GLES2Decoder()
+ : initialized_(false),
+ debug_(false),
+ log_commands_(false) {
+}
+
+GLES2Decoder::~GLES2Decoder() {
+}
+
+void GLES2Decoder::BeginDecoding() {}
+
+void GLES2Decoder::EndDecoding() {}
+
+// This class implements GLES2Decoder so we don't have to expose all the GLES2
+// cmd stuff to outside this class.
+class GLES2DecoderImpl : public GLES2Decoder,
+ public FramebufferManager::TextureDetachObserver,
+ public ErrorStateClient {
+ public:
+ explicit GLES2DecoderImpl(ContextGroup* group);
+ virtual ~GLES2DecoderImpl();
+
+ // Overridden from AsyncAPIInterface.
+ virtual Error DoCommand(unsigned int command,
+ unsigned int arg_count,
+ const void* args) OVERRIDE;
+
+ virtual error::Error DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) OVERRIDE;
+
+ template <bool DebugImpl>
+ error::Error DoCommandsImpl(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Overridden from AsyncAPIInterface.
+ virtual const char* GetCommandName(unsigned int command_id) const OVERRIDE;
+
+ // Overridden from GLES2Decoder.
+ virtual bool Initialize(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual void SetSurface(
+ const scoped_refptr<gfx::GLSurface>& surface) OVERRIDE;
+ virtual void ProduceFrontBuffer(const Mailbox& mailbox) OVERRIDE;
+ virtual bool ResizeOffscreenFrameBuffer(const gfx::Size& size) OVERRIDE;
+ void UpdateParentTextureInfo();
+ virtual bool MakeCurrent() OVERRIDE;
+ virtual GLES2Util* GetGLES2Util() OVERRIDE { return &util_; }
+ virtual gfx::GLContext* GetGLContext() OVERRIDE { return context_.get(); }
+ virtual ContextGroup* GetContextGroup() OVERRIDE { return group_.get(); }
+ virtual Capabilities GetCapabilities() OVERRIDE;
+ virtual void RestoreState(const ContextState* prev_state) OVERRIDE;
+
+ virtual void RestoreActiveTexture() const OVERRIDE {
+ state_.RestoreActiveTexture();
+ }
+ virtual void RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const OVERRIDE {
+ state_.RestoreAllTextureUnitBindings(prev_state);
+ }
+ virtual void RestoreActiveTextureUnitBinding(
+ unsigned int target) const OVERRIDE {
+ state_.RestoreActiveTextureUnitBinding(target);
+ }
+ virtual void RestoreBufferBindings() const OVERRIDE {
+ state_.RestoreBufferBindings();
+ }
+ virtual void RestoreGlobalState() const OVERRIDE {
+ state_.RestoreGlobalState(NULL);
+ }
+ virtual void RestoreProgramBindings() const OVERRIDE {
+ state_.RestoreProgramBindings();
+ }
+ virtual void RestoreTextureUnitBindings(unsigned unit) const OVERRIDE {
+ state_.RestoreTextureUnitBindings(unit, NULL);
+ }
+ virtual void RestoreFramebufferBindings() const OVERRIDE;
+ virtual void RestoreRenderbufferBindings() OVERRIDE;
+ virtual void RestoreTextureState(unsigned service_id) const OVERRIDE;
+
+ virtual void ClearAllAttributes() const OVERRIDE;
+ virtual void RestoreAllAttributes() const OVERRIDE;
+
+ virtual QueryManager* GetQueryManager() OVERRIDE {
+ return query_manager_.get();
+ }
+ virtual VertexArrayManager* GetVertexArrayManager() OVERRIDE {
+ return vertex_array_manager_.get();
+ }
+ virtual ImageManager* GetImageManager() OVERRIDE {
+ return image_manager_.get();
+ }
+ virtual bool ProcessPendingQueries() OVERRIDE;
+ virtual bool HasMoreIdleWork() OVERRIDE;
+ virtual void PerformIdleWork() OVERRIDE;
+
+ virtual void WaitForReadPixels(base::Closure callback) OVERRIDE;
+
+ virtual void SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) OVERRIDE;
+
+ virtual Logger* GetLogger() OVERRIDE;
+
+ virtual void BeginDecoding() OVERRIDE;
+ virtual void EndDecoding() OVERRIDE;
+
+ virtual ErrorState* GetErrorState() OVERRIDE;
+ virtual const ContextState* GetContextState() OVERRIDE { return &state_; }
+
+ virtual void SetShaderCacheCallback(
+ const ShaderCacheCallback& callback) OVERRIDE;
+ virtual void SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) OVERRIDE;
+
+ virtual AsyncPixelTransferManager*
+ GetAsyncPixelTransferManager() OVERRIDE;
+ virtual void ResetAsyncPixelTransferManagerForTest() OVERRIDE;
+ virtual void SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) OVERRIDE;
+ virtual void SetIgnoreCachedStateForTest(bool ignore) OVERRIDE;
+ void ProcessFinishedAsyncTransfers();
+
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) OVERRIDE;
+
+ virtual uint32 GetTextureUploadCount() OVERRIDE;
+ virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
+ virtual base::TimeDelta GetTotalProcessingCommandsTime() OVERRIDE;
+ virtual void AddProcessingCommandsTime(base::TimeDelta) OVERRIDE;
+
+ // Restores the current state to the user's settings.
+ void RestoreCurrentFramebufferBindings();
+
+ // Sets DEPTH_TEST, STENCIL_TEST and color mask for the current framebuffer.
+ void ApplyDirtyState();
+
+ // These check the state of the currently bound framebuffer or the
+ // backbuffer if no framebuffer is bound.
+ // If all_draw_buffers is false, only check with COLOR_ATTACHMENT0, otherwise
+ // check with all attached and enabled color attachments.
+ bool BoundFramebufferHasColorAttachmentWithAlpha(bool all_draw_buffers);
+ bool BoundFramebufferHasDepthAttachment();
+ bool BoundFramebufferHasStencilAttachment();
+
+ virtual error::ContextLostReason GetContextLostReason() OVERRIDE;
+
+ // Overridden from FramebufferManager::TextureDetachObserver:
+ virtual void OnTextureRefDetachedFromFramebuffer(
+ TextureRef* texture) OVERRIDE;
+
+ // Overriden from ErrorStateClient.
+ virtual void OnOutOfMemoryError() OVERRIDE;
+
+ // Ensure Renderbuffer corresponding to last DoBindRenderbuffer() is bound.
+ void EnsureRenderbufferBound();
+
+ // Helpers to facilitate calling into compatible extensions.
+ static void RenderbufferStorageMultisampleHelper(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height);
+
+ void BlitFramebufferHelper(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter);
+
+ private:
+ friend class ScopedFrameBufferBinder;
+ friend class ScopedResolvedFrameBufferBinder;
+ friend class BackFramebuffer;
+
+ // Initialize or re-initialize the shader translator.
+ bool InitializeShaderTranslator();
+
+ void UpdateCapabilities();
+
+ // Helpers for the glGen and glDelete functions.
+ bool GenTexturesHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteTexturesHelper(GLsizei n, const GLuint* client_ids);
+ bool GenBuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteBuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenFramebuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteFramebuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenRenderbuffersHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteRenderbuffersHelper(GLsizei n, const GLuint* client_ids);
+ bool GenQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
+ bool GenVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
+ void DeleteVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
+
+ // Helper for async upload token completion notification callback.
+ base::Closure AsyncUploadTokenCompletionClosure(uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset);
+
+
+
+ // Workarounds
+ void OnFboChanged() const;
+ void OnUseFramebuffer() const;
+
+ // TODO(gman): Cache these pointers?
+ BufferManager* buffer_manager() {
+ return group_->buffer_manager();
+ }
+
+ RenderbufferManager* renderbuffer_manager() {
+ return group_->renderbuffer_manager();
+ }
+
+ FramebufferManager* framebuffer_manager() {
+ return group_->framebuffer_manager();
+ }
+
+ ProgramManager* program_manager() {
+ return group_->program_manager();
+ }
+
+ ShaderManager* shader_manager() {
+ return group_->shader_manager();
+ }
+
+ ShaderTranslatorCache* shader_translator_cache() {
+ return group_->shader_translator_cache();
+ }
+
+ const TextureManager* texture_manager() const {
+ return group_->texture_manager();
+ }
+
+ TextureManager* texture_manager() {
+ return group_->texture_manager();
+ }
+
+ MailboxManager* mailbox_manager() {
+ return group_->mailbox_manager();
+ }
+
+ ImageManager* image_manager() { return image_manager_.get(); }
+
+ VertexArrayManager* vertex_array_manager() {
+ return vertex_array_manager_.get();
+ }
+
+ MemoryTracker* memory_tracker() {
+ return group_->memory_tracker();
+ }
+
+ bool EnsureGPUMemoryAvailable(size_t estimated_size) {
+ MemoryTracker* tracker = memory_tracker();
+ if (tracker) {
+ return tracker->EnsureGPUMemoryAvailable(estimated_size);
+ }
+ return true;
+ }
+
+ bool IsOffscreenBufferMultisampled() const {
+ return offscreen_target_samples_ > 1;
+ }
+
+ // Creates a Texture for the given texture.
+ TextureRef* CreateTexture(
+ GLuint client_id, GLuint service_id) {
+ return texture_manager()->CreateTexture(client_id, service_id);
+ }
+
+ // Gets the texture info for the given texture. Returns NULL if none exists.
+ TextureRef* GetTexture(GLuint client_id) const {
+ return texture_manager()->GetTexture(client_id);
+ }
+
+ // Deletes the texture info for the given texture.
+ void RemoveTexture(GLuint client_id) {
+ texture_manager()->RemoveTexture(client_id);
+ }
+
+ // Get the size (in pixels) of the currently bound frame buffer (either FBO
+ // or regular back buffer).
+ gfx::Size GetBoundReadFrameBufferSize();
+
+ // Get the format of the currently bound frame buffer (either FBO or regular
+ // back buffer)
+ GLenum GetBoundReadFrameBufferTextureType();
+ GLenum GetBoundReadFrameBufferInternalFormat();
+ GLenum GetBoundDrawFrameBufferInternalFormat();
+
+ // Wrapper for CompressedTexImage2D commands.
+ error::Error DoCompressedTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei image_size,
+ const void* data);
+
+ // Wrapper for CompressedTexSubImage2D.
+ void DoCompressedTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void * data);
+
+ // Wrapper for CopyTexImage2D.
+ void DoCopyTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border);
+
+ // Wrapper for SwapBuffers.
+ void DoSwapBuffers();
+
+ // Wrapper for CopyTexSubImage2D.
+ void DoCopyTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
+
+ // Validation for TexSubImage2D.
+ bool ValidateTexSubImage2D(
+ error::Error* error,
+ const char* function_name,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data);
+
+ // Wrapper for TexSubImage2D.
+ error::Error DoTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data);
+
+ // Extra validation for async tex(Sub)Image2D.
+ bool ValidateAsyncTransfer(
+ const char* function_name,
+ TextureRef* texture_ref,
+ GLenum target,
+ GLint level,
+ const void * data);
+
+ // Wrapper for TexImageIOSurface2DCHROMIUM.
+ void DoTexImageIOSurface2DCHROMIUM(
+ GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint io_surface_id,
+ GLuint plane);
+
+ void DoCopyTextureCHROMIUM(
+ GLenum target,
+ GLuint source_id,
+ GLuint target_id,
+ GLint level,
+ GLenum internal_format,
+ GLenum dest_type);
+
+ // Wrapper for TexStorage2DEXT.
+ void DoTexStorage2DEXT(
+ GLenum target,
+ GLint levels,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height);
+
+ void DoProduceTextureCHROMIUM(GLenum target, const GLbyte* key);
+ void DoProduceTextureDirectCHROMIUM(GLuint texture, GLenum target,
+ const GLbyte* key);
+ void ProduceTextureRef(std::string func_name, TextureRef* texture_ref,
+ GLenum target, const GLbyte* data);
+
+ void DoConsumeTextureCHROMIUM(GLenum target, const GLbyte* key);
+ void DoCreateAndConsumeTextureCHROMIUM(GLenum target, const GLbyte* key,
+ GLuint client_id);
+
+ void DoBindTexImage2DCHROMIUM(
+ GLenum target,
+ GLint image_id);
+ void DoReleaseTexImage2DCHROMIUM(
+ GLenum target,
+ GLint image_id);
+
+ void DoTraceEndCHROMIUM(void);
+
+ void DoDrawBuffersEXT(GLsizei count, const GLenum* bufs);
+
+ void DoLoseContextCHROMIUM(GLenum current, GLenum other);
+
+ void DoMatrixLoadfCHROMIUM(GLenum matrix_mode, const GLfloat* matrix);
+ void DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode);
+
+ // Creates a Program for the given program.
+ Program* CreateProgram(
+ GLuint client_id, GLuint service_id) {
+ return program_manager()->CreateProgram(client_id, service_id);
+ }
+
+ // Gets the program info for the given program. Returns NULL if none exists.
+ Program* GetProgram(GLuint client_id) {
+ return program_manager()->GetProgram(client_id);
+ }
+
+#if defined(NDEBUG)
+ void LogClientServiceMapping(
+ const char* /* function_name */,
+ GLuint /* client_id */,
+ GLuint /* service_id */) {
+ }
+ template<typename T>
+ void LogClientServiceForInfo(
+ T* /* info */, GLuint /* client_id */, const char* /* function_name */) {
+ }
+#else
+ void LogClientServiceMapping(
+ const char* function_name, GLuint client_id, GLuint service_id) {
+ if (service_logging_) {
+ VLOG(1) << "[" << logger_.GetLogPrefix() << "] " << function_name
+ << ": client_id = " << client_id
+ << ", service_id = " << service_id;
+ }
+ }
+ template<typename T>
+ void LogClientServiceForInfo(
+ T* info, GLuint client_id, const char* function_name) {
+ if (info) {
+ LogClientServiceMapping(function_name, client_id, info->service_id());
+ }
+ }
+#endif
+
+ // Gets the program info for the given program. If it's not a program
+ // generates a GL error. Returns NULL if not program.
+ Program* GetProgramInfoNotShader(
+ GLuint client_id, const char* function_name) {
+ Program* program = GetProgram(client_id);
+ if (!program) {
+ if (GetShader(client_id)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "shader passed for program");
+ } else {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "unknown program");
+ }
+ }
+ LogClientServiceForInfo(program, client_id, function_name);
+ return program;
+ }
+
+
+ // Creates a Shader for the given shader.
+ Shader* CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type) {
+ return shader_manager()->CreateShader(
+ client_id, service_id, shader_type);
+ }
+
+ // Gets the shader info for the given shader. Returns NULL if none exists.
+ Shader* GetShader(GLuint client_id) {
+ return shader_manager()->GetShader(client_id);
+ }
+
+ // Gets the shader info for the given shader. If it's not a shader generates a
+ // GL error. Returns NULL if not shader.
+ Shader* GetShaderInfoNotProgram(
+ GLuint client_id, const char* function_name) {
+ Shader* shader = GetShader(client_id);
+ if (!shader) {
+ if (GetProgram(client_id)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "program passed for shader");
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "unknown shader");
+ }
+ }
+ LogClientServiceForInfo(shader, client_id, function_name);
+ return shader;
+ }
+
+ // Creates a buffer info for the given buffer.
+ void CreateBuffer(GLuint client_id, GLuint service_id) {
+ return buffer_manager()->CreateBuffer(client_id, service_id);
+ }
+
+ // Gets the buffer info for the given buffer.
+ Buffer* GetBuffer(GLuint client_id) {
+ Buffer* buffer = buffer_manager()->GetBuffer(client_id);
+ return buffer;
+ }
+
+ // Removes any buffers in the VertexAtrribInfos and BufferInfos. This is used
+ // on glDeleteBuffers so we can make sure the user does not try to render
+ // with deleted buffers.
+ void RemoveBuffer(GLuint client_id);
+
+ // Creates a framebuffer info for the given framebuffer.
+ void CreateFramebuffer(GLuint client_id, GLuint service_id) {
+ return framebuffer_manager()->CreateFramebuffer(client_id, service_id);
+ }
+
+ // Gets the framebuffer info for the given framebuffer.
+ Framebuffer* GetFramebuffer(GLuint client_id) {
+ return framebuffer_manager()->GetFramebuffer(client_id);
+ }
+
+ // Removes the framebuffer info for the given framebuffer.
+ void RemoveFramebuffer(GLuint client_id) {
+ framebuffer_manager()->RemoveFramebuffer(client_id);
+ }
+
+ // Creates a renderbuffer info for the given renderbuffer.
+ void CreateRenderbuffer(GLuint client_id, GLuint service_id) {
+ return renderbuffer_manager()->CreateRenderbuffer(
+ client_id, service_id);
+ }
+
+ // Gets the renderbuffer info for the given renderbuffer.
+ Renderbuffer* GetRenderbuffer(GLuint client_id) {
+ return renderbuffer_manager()->GetRenderbuffer(client_id);
+ }
+
+ // Removes the renderbuffer info for the given renderbuffer.
+ void RemoveRenderbuffer(GLuint client_id) {
+ renderbuffer_manager()->RemoveRenderbuffer(client_id);
+ }
+
+ // Gets the vertex attrib manager for the given vertex array.
+ VertexAttribManager* GetVertexAttribManager(GLuint client_id) {
+ VertexAttribManager* info =
+ vertex_array_manager()->GetVertexAttribManager(client_id);
+ return info;
+ }
+
+ // Removes the vertex attrib manager for the given vertex array.
+ void RemoveVertexAttribManager(GLuint client_id) {
+ vertex_array_manager()->RemoveVertexAttribManager(client_id);
+ }
+
+ // Creates a vertex attrib manager for the given vertex array.
+ scoped_refptr<VertexAttribManager> CreateVertexAttribManager(
+ GLuint client_id,
+ GLuint service_id,
+ bool client_visible) {
+ return vertex_array_manager()->CreateVertexAttribManager(
+ client_id, service_id, group_->max_vertex_attribs(), client_visible);
+ }
+
+ void DoBindAttribLocation(GLuint client_id, GLuint index, const char* name);
+ void DoBindUniformLocationCHROMIUM(
+ GLuint client_id, GLint location, const char* name);
+
+ error::Error GetAttribLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str);
+
+ error::Error GetUniformLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str);
+
+ // Helper for glShaderSource.
+ error::Error ShaderSourceHelper(
+ GLuint client_id, const char* data, uint32 data_size);
+
+ // Clear any textures used by the current program.
+ bool ClearUnclearedTextures();
+
+ // Clears any uncleared attachments attached to the given frame buffer.
+ // Returns false if there was a generated GL error.
+ void ClearUnclearedAttachments(GLenum target, Framebuffer* framebuffer);
+
+ // overridden from GLES2Decoder
+ virtual bool ClearLevel(unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) OVERRIDE;
+
+ // Restore all GL state that affects clearing.
+ void RestoreClearState();
+
+ // Remembers the state of some capabilities.
+ // Returns: true if glEnable/glDisable should actually be called.
+ bool SetCapabilityState(GLenum cap, bool enabled);
+
+ // Check that the currently bound framebuffers are valid.
+ // Generates GL error if not.
+ bool CheckBoundFramebuffersValid(const char* func_name);
+
+ // Check that the currently bound read framebuffer has a color image
+ // attached. Generates GL error if not.
+ bool CheckBoundReadFramebufferColorAttachment(const char* func_name);
+
+ // Check if a framebuffer meets our requirements.
+ bool CheckFramebufferValid(
+ Framebuffer* framebuffer,
+ GLenum target,
+ const char* func_name);
+
+ // Checks if the current program exists and is valid. If not generates the
+ // appropriate GL error. Returns true if the current program is in a usable
+ // state.
+ bool CheckCurrentProgram(const char* function_name);
+
+ // Checks if the current program exists and is valid and that location is not
+ // -1. If the current program is not valid generates the appropriate GL
+ // error. Returns true if the current program is in a usable state and
+ // location is not -1.
+ bool CheckCurrentProgramForUniform(GLint location, const char* function_name);
+
+ // Gets the type of a uniform for a location in the current program. Sets GL
+ // errors if the current program is not valid. Returns true if the current
+ // program is valid and the location exists. Adjusts count so it
+ // does not overflow the uniform.
+ bool PrepForSetUniformByLocation(GLint fake_location,
+ const char* function_name,
+ Program::UniformApiType api_type,
+ GLint* real_location,
+ GLenum* type,
+ GLsizei* count);
+
+ // Gets the service id for any simulated backbuffer fbo.
+ GLuint GetBackbufferServiceId() const;
+
+ // Helper for glGetBooleanv, glGetFloatv and glGetIntegerv
+ bool GetHelper(GLenum pname, GLint* params, GLsizei* num_written);
+
+ // Helper for glGetVertexAttrib
+ void GetVertexAttribHelper(
+ const VertexAttrib* attrib, GLenum pname, GLint* param);
+
+ // Wrapper for glCreateProgram
+ bool CreateProgramHelper(GLuint client_id);
+
+ // Wrapper for glCreateShader
+ bool CreateShaderHelper(GLenum type, GLuint client_id);
+
+ // Wrapper for glActiveTexture
+ void DoActiveTexture(GLenum texture_unit);
+
+ // Wrapper for glAttachShader
+ void DoAttachShader(GLuint client_program_id, GLint client_shader_id);
+
+ // Wrapper for glBindBuffer since we need to track the current targets.
+ void DoBindBuffer(GLenum target, GLuint buffer);
+
+ // Wrapper for glBindFramebuffer since we need to track the current targets.
+ void DoBindFramebuffer(GLenum target, GLuint framebuffer);
+
+ // Wrapper for glBindRenderbuffer since we need to track the current targets.
+ void DoBindRenderbuffer(GLenum target, GLuint renderbuffer);
+
+ // Wrapper for glBindTexture since we need to track the current targets.
+ void DoBindTexture(GLenum target, GLuint texture);
+
+ // Wrapper for glBindVertexArrayOES
+ void DoBindVertexArrayOES(GLuint array);
+ void EmulateVertexArrayState();
+
+ // Wrapper for glBlitFramebufferCHROMIUM.
+ void DoBlitFramebufferCHROMIUM(
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter);
+
+ // Wrapper for glBufferSubData.
+ void DoBufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data);
+
+ // Wrapper for glCheckFramebufferStatus
+ GLenum DoCheckFramebufferStatus(GLenum target);
+
+ // Wrapper for glClear
+ error::Error DoClear(GLbitfield mask);
+
+ // Wrappers for various state.
+ void DoDepthRangef(GLclampf znear, GLclampf zfar);
+ void DoSampleCoverage(GLclampf value, GLboolean invert);
+
+ // Wrapper for glCompileShader.
+ void DoCompileShader(GLuint shader);
+
+ // Wrapper for glDetachShader
+ void DoDetachShader(GLuint client_program_id, GLint client_shader_id);
+
+ // Wrapper for glDisable
+ void DoDisable(GLenum cap);
+
+ // Wrapper for glDisableVertexAttribArray.
+ void DoDisableVertexAttribArray(GLuint index);
+
+ // Wrapper for glDiscardFramebufferEXT, since we need to track undefined
+ // attachments.
+ void DoDiscardFramebufferEXT(GLenum target,
+ GLsizei numAttachments,
+ const GLenum* attachments);
+
+ // Wrapper for glEnable
+ void DoEnable(GLenum cap);
+
+ // Wrapper for glEnableVertexAttribArray.
+ void DoEnableVertexAttribArray(GLuint index);
+
+ // Wrapper for glFinish.
+ void DoFinish();
+
+ // Wrapper for glFlush.
+ void DoFlush();
+
+ // Wrapper for glFramebufferRenderbufffer.
+ void DoFramebufferRenderbuffer(
+ GLenum target, GLenum attachment, GLenum renderbuffertarget,
+ GLuint renderbuffer);
+
+ // Wrapper for glFramebufferTexture2D.
+ void DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget, GLuint texture,
+ GLint level);
+
+ // Wrapper for glFramebufferTexture2DMultisampleEXT.
+ void DoFramebufferTexture2DMultisample(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture, GLint level, GLsizei samples);
+
+ // Common implementation for both DoFramebufferTexture2D wrappers.
+ void DoFramebufferTexture2DCommon(const char* name,
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture, GLint level, GLsizei samples);
+
+ // Wrapper for glGenerateMipmap
+ void DoGenerateMipmap(GLenum target);
+
+ // Helper for DoGetBooleanv, Floatv, and Intergerv to adjust pname
+ // to account for different pname values defined in different extension
+ // variants.
+ GLenum AdjustGetPname(GLenum pname);
+
+ // Wrapper for DoGetBooleanv.
+ void DoGetBooleanv(GLenum pname, GLboolean* params);
+
+ // Wrapper for DoGetFloatv.
+ void DoGetFloatv(GLenum pname, GLfloat* params);
+
+ // Wrapper for glGetFramebufferAttachmentParameteriv.
+ void DoGetFramebufferAttachmentParameteriv(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params);
+
+ // Wrapper for glGetIntegerv.
+ void DoGetIntegerv(GLenum pname, GLint* params);
+
+ // Gets the max value in a range in a buffer.
+ GLuint DoGetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset);
+
+ // Wrapper for glGetBufferParameteriv.
+ void DoGetBufferParameteriv(
+ GLenum target, GLenum pname, GLint* params);
+
+ // Wrapper for glGetProgramiv.
+ void DoGetProgramiv(
+ GLuint program_id, GLenum pname, GLint* params);
+
+ // Wrapper for glRenderbufferParameteriv.
+ void DoGetRenderbufferParameteriv(
+ GLenum target, GLenum pname, GLint* params);
+
+ // Wrapper for glGetShaderiv
+ void DoGetShaderiv(GLuint shader, GLenum pname, GLint* params);
+
+ // Wrappers for glGetTexParameter.
+ void DoGetTexParameterfv(GLenum target, GLenum pname, GLfloat* params);
+ void DoGetTexParameteriv(GLenum target, GLenum pname, GLint* params);
+ void InitTextureMaxAnisotropyIfNeeded(GLenum target, GLenum pname);
+
+ // Wrappers for glGetVertexAttrib.
+ void DoGetVertexAttribfv(GLuint index, GLenum pname, GLfloat *params);
+ void DoGetVertexAttribiv(GLuint index, GLenum pname, GLint *params);
+
+ // Wrappers for glIsXXX functions.
+ bool DoIsEnabled(GLenum cap);
+ bool DoIsBuffer(GLuint client_id);
+ bool DoIsFramebuffer(GLuint client_id);
+ bool DoIsProgram(GLuint client_id);
+ bool DoIsRenderbuffer(GLuint client_id);
+ bool DoIsShader(GLuint client_id);
+ bool DoIsTexture(GLuint client_id);
+ bool DoIsVertexArrayOES(GLuint client_id);
+
+ // Wrapper for glLinkProgram
+ void DoLinkProgram(GLuint program);
+
+ // Wrapper for glRenderbufferStorage.
+ void DoRenderbufferStorage(
+ GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+
+ // Handler for glRenderbufferStorageMultisampleCHROMIUM.
+ void DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height);
+
+ // Handler for glRenderbufferStorageMultisampleEXT
+ // (multisampled_render_to_texture).
+ void DoRenderbufferStorageMultisampleEXT(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height);
+
+ // Common validation for multisample extensions.
+ bool ValidateRenderbufferStorageMultisample(GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
+
+ // Verifies that the currently bound multisample renderbuffer is valid
+ // Very slow! Only done on platforms with driver bugs that return invalid
+ // buffers under memory pressure
+ bool VerifyMultisampleRenderbufferIntegrity(
+ GLuint renderbuffer, GLenum format);
+
+ // Wrapper for glReleaseShaderCompiler.
+ void DoReleaseShaderCompiler() { }
+
+ // Wrappers for glTexParameter functions.
+ void DoTexParameterf(GLenum target, GLenum pname, GLfloat param);
+ void DoTexParameteri(GLenum target, GLenum pname, GLint param);
+ void DoTexParameterfv(GLenum target, GLenum pname, const GLfloat* params);
+ void DoTexParameteriv(GLenum target, GLenum pname, const GLint* params);
+
+ // Wrappers for glUniform1i and glUniform1iv as according to the GLES2
+ // spec only these 2 functions can be used to set sampler uniforms.
+ void DoUniform1i(GLint fake_location, GLint v0);
+ void DoUniform1iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform2iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform3iv(GLint fake_location, GLsizei count, const GLint* value);
+ void DoUniform4iv(GLint fake_location, GLsizei count, const GLint* value);
+
+ // Wrappers for glUniformfv because some drivers don't correctly accept
+ // bool uniforms.
+ void DoUniform1fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform2fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform3fv(GLint fake_location, GLsizei count, const GLfloat* value);
+ void DoUniform4fv(GLint fake_location, GLsizei count, const GLfloat* value);
+
+ void DoUniformMatrix2fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+ void DoUniformMatrix3fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+ void DoUniformMatrix4fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value);
+
+ bool SetVertexAttribValue(
+ const char* function_name, GLuint index, const GLfloat* value);
+
+ // Wrappers for glVertexAttrib??
+ void DoVertexAttrib1f(GLuint index, GLfloat v0);
+ void DoVertexAttrib2f(GLuint index, GLfloat v0, GLfloat v1);
+ void DoVertexAttrib3f(GLuint index, GLfloat v0, GLfloat v1, GLfloat v2);
+ void DoVertexAttrib4f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+ void DoVertexAttrib1fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib2fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib3fv(GLuint index, const GLfloat *v);
+ void DoVertexAttrib4fv(GLuint index, const GLfloat *v);
+
+ // Wrapper for glViewport
+ void DoViewport(GLint x, GLint y, GLsizei width, GLsizei height);
+
+ // Wrapper for glUseProgram
+ void DoUseProgram(GLuint program);
+
+ // Wrapper for glValidateProgram.
+ void DoValidateProgram(GLuint program_client_id);
+
+ void DoInsertEventMarkerEXT(GLsizei length, const GLchar* marker);
+ void DoPushGroupMarkerEXT(GLsizei length, const GLchar* group);
+ void DoPopGroupMarkerEXT(void);
+
+ // Gets the number of values that will be returned by glGetXXX. Returns
+ // false if pname is unknown.
+ bool GetNumValuesReturnedForGLGet(GLenum pname, GLsizei* num_values);
+
+ // Checks if the current program and vertex attributes are valid for drawing.
+ bool IsDrawValid(
+ const char* function_name, GLuint max_vertex_accessed, bool instanced,
+ GLsizei primcount);
+
+ // Returns true if successful, simulated will be true if attrib0 was
+ // simulated.
+ bool SimulateAttrib0(
+ const char* function_name, GLuint max_vertex_accessed, bool* simulated);
+ void RestoreStateForAttrib(GLuint attrib, bool restore_array_binding);
+
+ // If an image is bound to texture, this will call Will/DidUseTexImage
+ // if needed.
+ void DoWillUseTexImageIfNeeded(Texture* texture, GLenum textarget);
+ void DoDidUseTexImageIfNeeded(Texture* texture, GLenum textarget);
+
+ // Returns false if textures were replaced.
+ bool PrepareTexturesForRender();
+ void RestoreStateForTextures();
+
+ // Returns true if GL_FIXED attribs were simulated.
+ bool SimulateFixedAttribs(
+ const char* function_name,
+ GLuint max_vertex_accessed, bool* simulated, GLsizei primcount);
+ void RestoreStateForSimulatedFixedAttribs();
+
+ // Handle DrawArrays and DrawElements for both instanced and non-instanced
+ // cases (primcount is always 1 for non-instanced).
+ error::Error DoDrawArrays(
+ const char* function_name,
+ bool instanced, GLenum mode, GLint first, GLsizei count,
+ GLsizei primcount);
+ error::Error DoDrawElements(
+ const char* function_name,
+ bool instanced, GLenum mode, GLsizei count, GLenum type,
+ int32 offset, GLsizei primcount);
+
+ GLenum GetBindTargetForSamplerType(GLenum type) {
+ DCHECK(type == GL_SAMPLER_2D || type == GL_SAMPLER_CUBE ||
+ type == GL_SAMPLER_EXTERNAL_OES || type == GL_SAMPLER_2D_RECT_ARB);
+ switch (type) {
+ case GL_SAMPLER_2D:
+ return GL_TEXTURE_2D;
+ case GL_SAMPLER_CUBE:
+ return GL_TEXTURE_CUBE_MAP;
+ case GL_SAMPLER_EXTERNAL_OES:
+ return GL_TEXTURE_EXTERNAL_OES;
+ case GL_SAMPLER_2D_RECT_ARB:
+ return GL_TEXTURE_RECTANGLE_ARB;
+ }
+
+ NOTREACHED();
+ return 0;
+ }
+
+ // Gets the framebuffer info for a particular target.
+ Framebuffer* GetFramebufferInfoForTarget(GLenum target) {
+ Framebuffer* framebuffer = NULL;
+ switch (target) {
+ case GL_FRAMEBUFFER:
+ case GL_DRAW_FRAMEBUFFER_EXT:
+ framebuffer = framebuffer_state_.bound_draw_framebuffer.get();
+ break;
+ case GL_READ_FRAMEBUFFER_EXT:
+ framebuffer = framebuffer_state_.bound_read_framebuffer.get();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ return framebuffer;
+ }
+
+ Renderbuffer* GetRenderbufferInfoForTarget(
+ GLenum target) {
+ Renderbuffer* renderbuffer = NULL;
+ switch (target) {
+ case GL_RENDERBUFFER:
+ renderbuffer = state_.bound_renderbuffer.get();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ return renderbuffer;
+ }
+
+ // Validates the program and location for a glGetUniform call and returns
+ // a SizeResult setup to receive the result. Returns true if glGetUniform
+ // should be called.
+ bool GetUniformSetup(
+ GLuint program, GLint fake_location,
+ uint32 shm_id, uint32 shm_offset,
+ error::Error* error, GLint* real_location, GLuint* service_id,
+ void** result, GLenum* result_type);
+
+ virtual bool WasContextLost() OVERRIDE;
+ virtual bool WasContextLostByRobustnessExtension() OVERRIDE;
+ virtual void LoseContext(uint32 reset_status) OVERRIDE;
+
+#if defined(OS_MACOSX)
+ void ReleaseIOSurfaceForTexture(GLuint texture_id);
+#endif
+
+ bool ValidateCompressedTexDimensions(
+ const char* function_name,
+ GLint level, GLsizei width, GLsizei height, GLenum format);
+ bool ValidateCompressedTexFuncData(
+ const char* function_name,
+ GLsizei width, GLsizei height, GLenum format, size_t size);
+ bool ValidateCompressedTexSubDimensions(
+ const char* function_name,
+ GLenum target, GLint level, GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height, GLenum format,
+ Texture* texture);
+
+ void RenderWarning(const char* filename, int line, const std::string& msg);
+ void PerformanceWarning(
+ const char* filename, int line, const std::string& msg);
+
+ const FeatureInfo::FeatureFlags& features() const {
+ return feature_info_->feature_flags();
+ }
+
+ const FeatureInfo::Workarounds& workarounds() const {
+ return feature_info_->workarounds();
+ }
+
+ bool ShouldDeferDraws() {
+ return !offscreen_target_frame_buffer_.get() &&
+ framebuffer_state_.bound_draw_framebuffer.get() == NULL &&
+ surface_->DeferDraws();
+ }
+
+ bool ShouldDeferReads() {
+ return !offscreen_target_frame_buffer_.get() &&
+ framebuffer_state_.bound_read_framebuffer.get() == NULL &&
+ surface_->DeferDraws();
+ }
+
+ error::Error WillAccessBoundFramebufferForDraw() {
+ if (ShouldDeferDraws())
+ return error::kDeferCommandUntilLater;
+ if (!offscreen_target_frame_buffer_.get() &&
+ !framebuffer_state_.bound_draw_framebuffer.get() &&
+ !surface_->SetBackbufferAllocation(true))
+ return error::kLostContext;
+ return error::kNoError;
+ }
+
+ error::Error WillAccessBoundFramebufferForRead() {
+ if (ShouldDeferReads())
+ return error::kDeferCommandUntilLater;
+ if (!offscreen_target_frame_buffer_.get() &&
+ !framebuffer_state_.bound_read_framebuffer.get() &&
+ !surface_->SetBackbufferAllocation(true))
+ return error::kLostContext;
+ return error::kNoError;
+ }
+
+ // Set remaining commands to process to 0 to force DoCommands to return
+ // and allow context preemption and GPU watchdog checks in GpuScheduler().
+ void ExitCommandProcessingEarly() { commands_to_process_ = 0; }
+
+ void ProcessPendingReadPixels();
+ void FinishReadPixels(const cmds::ReadPixels& c, GLuint buffer);
+
+ // Generate a member function prototype for each command in an automated and
+ // typesafe way.
+#define GLES2_CMD_OP(name) \
+ Error Handle##name(uint32 immediate_data_size, const void* data);
+
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+
+ #undef GLES2_CMD_OP
+
+ // The GL context this decoder renders to on behalf of the client.
+ scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<gfx::GLContext> context_;
+
+ // The ContextGroup for this decoder uses to track resources.
+ scoped_refptr<ContextGroup> group_;
+
+ DebugMarkerManager debug_marker_manager_;
+ Logger logger_;
+
+ // All the state for this context.
+ ContextState state_;
+
+ // Current width and height of the offscreen frame buffer.
+ gfx::Size offscreen_size_;
+
+ // Util to help with GL.
+ GLES2Util util_;
+
+ // unpack flip y as last set by glPixelStorei
+ bool unpack_flip_y_;
+
+ // unpack (un)premultiply alpha as last set by glPixelStorei
+ bool unpack_premultiply_alpha_;
+ bool unpack_unpremultiply_alpha_;
+
+ // The buffer we bind to attrib 0 since OpenGL requires it (ES does not).
+ GLuint attrib_0_buffer_id_;
+
+ // The value currently in attrib_0.
+ Vec4 attrib_0_value_;
+
+ // Whether or not the attrib_0 buffer holds the attrib_0_value.
+ bool attrib_0_buffer_matches_value_;
+
+ // The size of attrib 0.
+ GLsizei attrib_0_size_;
+
+ // The buffer used to simulate GL_FIXED attribs.
+ GLuint fixed_attrib_buffer_id_;
+
+ // The size of fiixed attrib buffer.
+ GLsizei fixed_attrib_buffer_size_;
+
+ // The offscreen frame buffer that the client renders to. With EGL, the
+ // depth and stencil buffers are separate. With regular GL there is a single
+ // packed depth stencil buffer in offscreen_target_depth_render_buffer_.
+ // offscreen_target_stencil_render_buffer_ is unused.
+ scoped_ptr<BackFramebuffer> offscreen_target_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_target_color_texture_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_color_render_buffer_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_depth_render_buffer_;
+ scoped_ptr<BackRenderbuffer> offscreen_target_stencil_render_buffer_;
+ GLenum offscreen_target_color_format_;
+ GLenum offscreen_target_depth_format_;
+ GLenum offscreen_target_stencil_format_;
+ GLsizei offscreen_target_samples_;
+ GLboolean offscreen_target_buffer_preserved_;
+
+ // The copy that is saved when SwapBuffers is called.
+ scoped_ptr<BackFramebuffer> offscreen_saved_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_saved_color_texture_;
+ scoped_refptr<TextureRef>
+ offscreen_saved_color_texture_info_;
+
+ // The copy that is used as the destination for multi-sample resolves.
+ scoped_ptr<BackFramebuffer> offscreen_resolved_frame_buffer_;
+ scoped_ptr<BackTexture> offscreen_resolved_color_texture_;
+ GLenum offscreen_saved_color_format_;
+
+ scoped_ptr<QueryManager> query_manager_;
+
+ scoped_ptr<VertexArrayManager> vertex_array_manager_;
+
+ scoped_ptr<ImageManager> image_manager_;
+
+ base::Callback<void(gfx::Size, float)> resize_callback_;
+
+ WaitSyncPointCallback wait_sync_point_callback_;
+
+ ShaderCacheCallback shader_cache_callback_;
+
+ scoped_ptr<AsyncPixelTransferManager> async_pixel_transfer_manager_;
+
+ // The format of the back buffer_
+ GLenum back_buffer_color_format_;
+ bool back_buffer_has_depth_;
+ bool back_buffer_has_stencil_;
+
+ bool surfaceless_;
+
+ // Backbuffer attachments that are currently undefined.
+ uint32 backbuffer_needs_clear_bits_;
+
+ // The current decoder error communicates the decoder error through command
+ // processing functions that do not return the error value. Should be set only
+ // if not returning an error.
+ error::Error current_decoder_error_;
+
+ bool use_shader_translator_;
+ scoped_refptr<ShaderTranslator> vertex_translator_;
+ scoped_refptr<ShaderTranslator> fragment_translator_;
+
+ DisallowedFeatures disallowed_features_;
+
+ // Cached from ContextGroup
+ const Validators* validators_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ int frame_number_;
+
+ // Number of commands remaining to be processed in DoCommands().
+ int commands_to_process_;
+
+ bool has_robustness_extension_;
+ GLenum reset_status_;
+ bool reset_by_robustness_extension_;
+ bool supports_post_sub_buffer_;
+
+ // These flags are used to override the state of the shared feature_info_
+ // member. Because the same FeatureInfo instance may be shared among many
+ // contexts, the assumptions on the availablity of extensions in WebGL
+ // contexts may be broken. These flags override the shared state to preserve
+ // WebGL semantics.
+ bool force_webgl_glsl_validation_;
+ bool derivatives_explicitly_enabled_;
+ bool frag_depth_explicitly_enabled_;
+ bool draw_buffers_explicitly_enabled_;
+ bool shader_texture_lod_explicitly_enabled_;
+
+ bool compile_shader_always_succeeds_;
+
+ // An optional behaviour to lose the context and group when OOM.
+ bool lose_context_when_out_of_memory_;
+
+ // Log extra info.
+ bool service_logging_;
+
+#if defined(OS_MACOSX)
+ typedef std::map<GLuint, IOSurfaceRef> TextureToIOSurfaceMap;
+ TextureToIOSurfaceMap texture_to_io_surface_map_;
+#endif
+
+ scoped_ptr<CopyTextureCHROMIUMResourceManager> copy_texture_CHROMIUM_;
+
+ // Cached values of the currently assigned viewport dimensions.
+ GLsizei viewport_max_width_;
+ GLsizei viewport_max_height_;
+
+ // Command buffer stats.
+ base::TimeDelta total_processing_commands_time_;
+
+ // States related to each manager.
+ DecoderTextureState texture_state_;
+ DecoderFramebufferState framebuffer_state_;
+
+ scoped_ptr<GPUTracer> gpu_tracer_;
+ scoped_ptr<GPUStateTracer> gpu_state_tracer_;
+ const unsigned char* cb_command_trace_category_;
+ int gpu_trace_level_;
+ bool gpu_trace_commands_;
+ bool gpu_debug_commands_;
+
+ std::queue<linked_ptr<FenceCallback> > pending_readpixel_fences_;
+
+ // Used to validate multisample renderbuffers if needed
+ GLuint validation_texture_;
+ GLuint validation_fbo_multisample_;
+ GLuint validation_fbo_;
+
+ typedef gpu::gles2::GLES2Decoder::Error (GLES2DecoderImpl::*CmdHandler)(
+ uint32 immediate_data_size,
+ const void* data);
+
+ // A struct to hold info about each command.
+ struct CommandInfo {
+ CmdHandler cmd_handler;
+ uint8 arg_flags; // How to handle the arguments for this command
+ uint8 cmd_flags; // How to handle this command
+ uint16 arg_count; // How many arguments are expected for this command.
+ };
+
+ // A table of CommandInfo for all the commands.
+ static const CommandInfo command_info[kNumCommands - kStartPoint];
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2DecoderImpl);
+};
+
+const GLES2DecoderImpl::CommandInfo GLES2DecoderImpl::command_info[] = {
+#define GLES2_CMD_OP(name) \
+ { \
+ &GLES2DecoderImpl::Handle##name, cmds::name::kArgFlags, \
+ cmds::name::cmd_flags, \
+ sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
+ } \
+ , /* NOLINT */
+ GLES2_COMMAND_LIST(GLES2_CMD_OP)
+#undef GLES2_CMD_OP
+};
+
+ScopedGLErrorSuppressor::ScopedGLErrorSuppressor(
+ const char* function_name, ErrorState* error_state)
+ : function_name_(function_name),
+ error_state_(error_state) {
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_, function_name_);
+}
+
+ScopedGLErrorSuppressor::~ScopedGLErrorSuppressor() {
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_, function_name_);
+}
+
+static void RestoreCurrentTextureBindings(ContextState* state, GLenum target) {
+ TextureUnit& info = state->texture_units[0];
+ GLuint last_id;
+ scoped_refptr<TextureRef> texture_ref;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_ref = info.bound_texture_2d;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_ref = info.bound_texture_cube_map;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_ref = info.bound_texture_external_oes;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_ref = info.bound_texture_rectangle_arb;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ if (texture_ref.get()) {
+ last_id = texture_ref->service_id();
+ } else {
+ last_id = 0;
+ }
+
+ glBindTexture(target, last_id);
+ glActiveTexture(GL_TEXTURE0 + state->active_texture_unit);
+}
+
+ScopedTextureBinder::ScopedTextureBinder(ContextState* state,
+ GLuint id,
+ GLenum target)
+ : state_(state),
+ target_(target) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedTextureBinder::ctor", state_->GetErrorState());
+
+ // TODO(apatrick): Check if there are any other states that need to be reset
+ // before binding a new texture.
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(target, id);
+}
+
+ScopedTextureBinder::~ScopedTextureBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedTextureBinder::dtor", state_->GetErrorState());
+ RestoreCurrentTextureBindings(state_, target_);
+}
+
+ScopedRenderBufferBinder::ScopedRenderBufferBinder(ContextState* state,
+ GLuint id)
+ : state_(state) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedRenderBufferBinder::ctor", state_->GetErrorState());
+ glBindRenderbufferEXT(GL_RENDERBUFFER, id);
+}
+
+ScopedRenderBufferBinder::~ScopedRenderBufferBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedRenderBufferBinder::dtor", state_->GetErrorState());
+ state_->RestoreRenderbufferBindings();
+}
+
+ScopedFrameBufferBinder::ScopedFrameBufferBinder(GLES2DecoderImpl* decoder,
+ GLuint id)
+ : decoder_(decoder) {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedFrameBufferBinder::ctor", decoder_->GetErrorState());
+ glBindFramebufferEXT(GL_FRAMEBUFFER, id);
+ decoder->OnFboChanged();
+}
+
+ScopedFrameBufferBinder::~ScopedFrameBufferBinder() {
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedFrameBufferBinder::dtor", decoder_->GetErrorState());
+ decoder_->RestoreCurrentFramebufferBindings();
+}
+
+ScopedResolvedFrameBufferBinder::ScopedResolvedFrameBufferBinder(
+ GLES2DecoderImpl* decoder, bool enforce_internal_framebuffer, bool internal)
+ : decoder_(decoder) {
+ resolve_and_bind_ = (
+ decoder_->offscreen_target_frame_buffer_.get() &&
+ decoder_->IsOffscreenBufferMultisampled() &&
+ (!decoder_->framebuffer_state_.bound_read_framebuffer.get() ||
+ enforce_internal_framebuffer));
+ if (!resolve_and_bind_)
+ return;
+
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedResolvedFrameBufferBinder::ctor", decoder_->GetErrorState());
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT,
+ decoder_->offscreen_target_frame_buffer_->id());
+ GLuint targetid;
+ if (internal) {
+ if (!decoder_->offscreen_resolved_frame_buffer_.get()) {
+ decoder_->offscreen_resolved_frame_buffer_.reset(
+ new BackFramebuffer(decoder_));
+ decoder_->offscreen_resolved_frame_buffer_->Create();
+ decoder_->offscreen_resolved_color_texture_.reset(
+ new BackTexture(decoder->memory_tracker(), &decoder->state_));
+ decoder_->offscreen_resolved_color_texture_->Create();
+
+ DCHECK(decoder_->offscreen_saved_color_format_);
+ decoder_->offscreen_resolved_color_texture_->AllocateStorage(
+ decoder_->offscreen_size_, decoder_->offscreen_saved_color_format_,
+ false);
+ decoder_->offscreen_resolved_frame_buffer_->AttachRenderTexture(
+ decoder_->offscreen_resolved_color_texture_.get());
+ if (decoder_->offscreen_resolved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "ScopedResolvedFrameBufferBinder failed "
+ << "because offscreen resolved FBO was incomplete.";
+ return;
+ }
+ }
+ targetid = decoder_->offscreen_resolved_frame_buffer_->id();
+ } else {
+ targetid = decoder_->offscreen_saved_frame_buffer_->id();
+ }
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, targetid);
+ const int width = decoder_->offscreen_size_.width();
+ const int height = decoder_->offscreen_size_.height();
+ decoder->state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ decoder->BlitFramebufferHelper(0,
+ 0,
+ width,
+ height,
+ 0,
+ 0,
+ width,
+ height,
+ GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, targetid);
+}
+
+ScopedResolvedFrameBufferBinder::~ScopedResolvedFrameBufferBinder() {
+ if (!resolve_and_bind_)
+ return;
+
+ ScopedGLErrorSuppressor suppressor(
+ "ScopedResolvedFrameBufferBinder::dtor", decoder_->GetErrorState());
+ decoder_->RestoreCurrentFramebufferBindings();
+ if (decoder_->state_.enable_flags.scissor_test) {
+ decoder_->state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+ }
+}
+
+BackTexture::BackTexture(
+ MemoryTracker* memory_tracker,
+ ContextState* state)
+ : memory_tracker_(memory_tracker, MemoryTracker::kUnmanaged),
+ state_(state),
+ bytes_allocated_(0),
+ id_(0) {
+}
+
+BackTexture::~BackTexture() {
+ // This does not destroy the render texture because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackTexture::Create() {
+ ScopedGLErrorSuppressor suppressor("BackTexture::Create",
+ state_->GetErrorState());
+ Destroy();
+ glGenTextures(1, &id_);
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // TODO(apatrick): Attempt to diagnose crbug.com/97775. If SwapBuffers is
+ // never called on an offscreen context, no data will ever be uploaded to the
+ // saved offscreen color texture (it is deferred until to when SwapBuffers
+ // is called). My idea is that some nvidia drivers might have a bug where
+ // deleting a texture that has never been populated might cause a
+ // crash.
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+
+ bytes_allocated_ = 16u * 16u * 4u;
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+}
+
+bool BackTexture::AllocateStorage(
+ const gfx::Size& size, GLenum format, bool zero) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackTexture::AllocateStorage",
+ state_->GetErrorState());
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ uint32 image_size = 0;
+ GLES2Util::ComputeImageDataSizes(
+ size.width(), size.height(), format, GL_UNSIGNED_BYTE, 8, &image_size,
+ NULL, NULL);
+
+ if (!memory_tracker_.EnsureGPUMemoryAvailable(image_size)) {
+ return false;
+ }
+
+ scoped_ptr<char[]> zero_data;
+ if (zero) {
+ zero_data.reset(new char[image_size]);
+ memset(zero_data.get(), 0, image_size);
+ }
+
+ glTexImage2D(GL_TEXTURE_2D,
+ 0, // mip level
+ format,
+ size.width(),
+ size.height(),
+ 0, // border
+ format,
+ GL_UNSIGNED_BYTE,
+ zero_data.get());
+
+ size_ = size;
+
+ bool success = glGetError() == GL_NO_ERROR;
+ if (success) {
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = image_size;
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+ }
+ return success;
+}
+
+void BackTexture::Copy(const gfx::Size& size, GLenum format) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackTexture::Copy",
+ state_->GetErrorState());
+ ScopedTextureBinder binder(state_, id_, GL_TEXTURE_2D);
+ glCopyTexImage2D(GL_TEXTURE_2D,
+ 0, // level
+ format,
+ 0, 0,
+ size.width(),
+ size.height(),
+ 0); // border
+}
+
+void BackTexture::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackTexture::Destroy",
+ state_->GetErrorState());
+ glDeleteTextures(1, &id_);
+ id_ = 0;
+ }
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
+}
+
+void BackTexture::Invalidate() {
+ id_ = 0;
+}
+
+BackRenderbuffer::BackRenderbuffer(
+ RenderbufferManager* renderbuffer_manager,
+ MemoryTracker* memory_tracker,
+ ContextState* state)
+ : renderbuffer_manager_(renderbuffer_manager),
+ memory_tracker_(memory_tracker, MemoryTracker::kUnmanaged),
+ state_(state),
+ bytes_allocated_(0),
+ id_(0) {
+}
+
+BackRenderbuffer::~BackRenderbuffer() {
+ // This does not destroy the render buffer because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackRenderbuffer::Create() {
+ ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Create",
+ state_->GetErrorState());
+ Destroy();
+ glGenRenderbuffersEXT(1, &id_);
+}
+
+bool BackRenderbuffer::AllocateStorage(const FeatureInfo* feature_info,
+ const gfx::Size& size,
+ GLenum format,
+ GLsizei samples) {
+ ScopedGLErrorSuppressor suppressor(
+ "BackRenderbuffer::AllocateStorage", state_->GetErrorState());
+ ScopedRenderBufferBinder binder(state_, id_);
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager_->ComputeEstimatedRenderbufferSize(
+ size.width(), size.height(), samples, format, &estimated_size)) {
+ return false;
+ }
+
+ if (!memory_tracker_.EnsureGPUMemoryAvailable(estimated_size)) {
+ return false;
+ }
+
+ if (samples <= 1) {
+ glRenderbufferStorageEXT(GL_RENDERBUFFER,
+ format,
+ size.width(),
+ size.height());
+ } else {
+ GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(feature_info,
+ GL_RENDERBUFFER,
+ samples,
+ format,
+ size.width(),
+ size.height());
+ }
+ bool success = glGetError() == GL_NO_ERROR;
+ if (success) {
+ // Mark the previously allocated bytes as free.
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = estimated_size;
+ // Track the newly allocated bytes.
+ memory_tracker_.TrackMemAlloc(bytes_allocated_);
+ }
+ return success;
+}
+
+void BackRenderbuffer::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Destroy",
+ state_->GetErrorState());
+ glDeleteRenderbuffersEXT(1, &id_);
+ id_ = 0;
+ }
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
+}
+
+void BackRenderbuffer::Invalidate() {
+ id_ = 0;
+}
+
+BackFramebuffer::BackFramebuffer(GLES2DecoderImpl* decoder)
+ : decoder_(decoder),
+ id_(0) {
+}
+
+BackFramebuffer::~BackFramebuffer() {
+ // This does not destroy the frame buffer because that would require that
+ // the associated GL context was current. Just check that it was explicitly
+ // destroyed.
+ DCHECK_EQ(id_, 0u);
+}
+
+void BackFramebuffer::Create() {
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::Create",
+ decoder_->GetErrorState());
+ Destroy();
+ glGenFramebuffersEXT(1, &id_);
+}
+
+void BackFramebuffer::AttachRenderTexture(BackTexture* texture) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor(
+ "BackFramebuffer::AttachRenderTexture", decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ GLuint attach_id = texture ? texture->id() : 0;
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ attach_id,
+ 0);
+}
+
+void BackFramebuffer::AttachRenderBuffer(GLenum target,
+ BackRenderbuffer* render_buffer) {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor(
+ "BackFramebuffer::AttachRenderBuffer", decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ GLuint attach_id = render_buffer ? render_buffer->id() : 0;
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ target,
+ GL_RENDERBUFFER,
+ attach_id);
+}
+
+void BackFramebuffer::Destroy() {
+ if (id_ != 0) {
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::Destroy",
+ decoder_->GetErrorState());
+ glDeleteFramebuffersEXT(1, &id_);
+ id_ = 0;
+ }
+}
+
+void BackFramebuffer::Invalidate() {
+ id_ = 0;
+}
+
+GLenum BackFramebuffer::CheckStatus() {
+ DCHECK_NE(id_, 0u);
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::CheckStatus",
+ decoder_->GetErrorState());
+ ScopedFrameBufferBinder binder(decoder_, id_);
+ return glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+}
+
+GLES2Decoder* GLES2Decoder::Create(ContextGroup* group) {
+ return new GLES2DecoderImpl(group);
+}
+
+GLES2DecoderImpl::GLES2DecoderImpl(ContextGroup* group)
+ : GLES2Decoder(),
+ group_(group),
+ logger_(&debug_marker_manager_),
+ state_(group_->feature_info(), this, &logger_),
+ unpack_flip_y_(false),
+ unpack_premultiply_alpha_(false),
+ unpack_unpremultiply_alpha_(false),
+ attrib_0_buffer_id_(0),
+ attrib_0_buffer_matches_value_(true),
+ attrib_0_size_(0),
+ fixed_attrib_buffer_id_(0),
+ fixed_attrib_buffer_size_(0),
+ offscreen_target_color_format_(0),
+ offscreen_target_depth_format_(0),
+ offscreen_target_stencil_format_(0),
+ offscreen_target_samples_(0),
+ offscreen_target_buffer_preserved_(true),
+ offscreen_saved_color_format_(0),
+ back_buffer_color_format_(0),
+ back_buffer_has_depth_(false),
+ back_buffer_has_stencil_(false),
+ surfaceless_(false),
+ backbuffer_needs_clear_bits_(0),
+ current_decoder_error_(error::kNoError),
+ use_shader_translator_(true),
+ validators_(group_->feature_info()->validators()),
+ feature_info_(group_->feature_info()),
+ frame_number_(0),
+ has_robustness_extension_(false),
+ reset_status_(GL_NO_ERROR),
+ reset_by_robustness_extension_(false),
+ supports_post_sub_buffer_(false),
+ force_webgl_glsl_validation_(false),
+ derivatives_explicitly_enabled_(false),
+ frag_depth_explicitly_enabled_(false),
+ draw_buffers_explicitly_enabled_(false),
+ shader_texture_lod_explicitly_enabled_(false),
+ compile_shader_always_succeeds_(false),
+ lose_context_when_out_of_memory_(false),
+ service_logging_(CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)),
+ viewport_max_width_(0),
+ viewport_max_height_(0),
+ texture_state_(group_->feature_info()
+ ->workarounds()
+ .texsubimage2d_faster_than_teximage2d),
+ cb_command_trace_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("cb_command"))),
+ gpu_trace_level_(2),
+ gpu_trace_commands_(false),
+ gpu_debug_commands_(false),
+ validation_texture_(0),
+ validation_fbo_multisample_(0),
+ validation_fbo_(0) {
+ DCHECK(group);
+
+ attrib_0_value_.v[0] = 0.0f;
+ attrib_0_value_.v[1] = 0.0f;
+ attrib_0_value_.v[2] = 0.0f;
+ attrib_0_value_.v[3] = 1.0f;
+
+ // The shader translator is used for WebGL even when running on EGL
+ // because additional restrictions are needed (like only enabling
+ // GL_OES_standard_derivatives on demand). It is used for the unit
+ // tests because GLES2DecoderWithShaderTest.GetShaderInfoLogValidArgs passes
+ // the empty string to CompileShader and this is not a valid shader.
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationMockGL ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGLSLTranslator)) {
+ use_shader_translator_ = false;
+ }
+}
+
+GLES2DecoderImpl::~GLES2DecoderImpl() {
+}
+
+bool GLES2DecoderImpl::Initialize(
+ const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::Initialize");
+ DCHECK(context->IsCurrent(surface.get()));
+ DCHECK(!context_.get());
+
+ surfaceless_ = surface->IsSurfaceless();
+
+ set_initialized();
+ gpu_tracer_.reset(new GPUTracer(this));
+ gpu_state_tracer_ = GPUStateTracer::Create(&state_);
+
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUDebugging)) {
+ set_debug(true);
+ }
+
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUCommandLogging)) {
+ set_log_commands(true);
+ }
+
+ compile_shader_always_succeeds_ = CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kCompileShaderAlwaysSucceeds);
+
+
+ // Take ownership of the context and surface. The surface can be replaced with
+ // SetSurface.
+ context_ = context;
+ surface_ = surface;
+
+ ContextCreationAttribHelper attrib_parser;
+ if (!attrib_parser.Parse(attribs))
+ return false;
+
+ // Save the loseContextWhenOutOfMemory context creation attribute.
+ lose_context_when_out_of_memory_ =
+ attrib_parser.lose_context_when_out_of_memory;
+
+ // If the failIfMajorPerformanceCaveat context creation attribute was true
+ // and we are using a software renderer, fail.
+ if (attrib_parser.fail_if_major_perf_caveat &&
+ feature_info_->feature_flags().is_swiftshader) {
+ group_ = NULL; // Must not destroy ContextGroup if it is not initialized.
+ Destroy(true);
+ return false;
+ }
+
+ if (!group_->Initialize(this, disallowed_features)) {
+ LOG(ERROR) << "GpuScheduler::InitializeCommon failed because group "
+ << "failed to initialize.";
+ group_ = NULL; // Must not destroy ContextGroup if it is not initialized.
+ Destroy(true);
+ return false;
+ }
+ CHECK_GL_ERROR();
+
+ disallowed_features_ = disallowed_features;
+
+ state_.attrib_values.resize(group_->max_vertex_attribs());
+ vertex_array_manager_.reset(new VertexArrayManager());
+
+ GLuint default_vertex_attrib_service_id = 0;
+ if (features().native_vertex_array_object) {
+ glGenVertexArraysOES(1, &default_vertex_attrib_service_id);
+ glBindVertexArrayOES(default_vertex_attrib_service_id);
+ }
+
+ state_.default_vertex_attrib_manager =
+ CreateVertexAttribManager(0, default_vertex_attrib_service_id, false);
+
+ state_.default_vertex_attrib_manager->Initialize(
+ group_->max_vertex_attribs(),
+ feature_info_->workarounds().init_vertex_attributes);
+
+ // vertex_attrib_manager is set to default_vertex_attrib_manager by this call
+ DoBindVertexArrayOES(0);
+
+ query_manager_.reset(new QueryManager(this, feature_info_.get()));
+
+ image_manager_.reset(new ImageManager);
+
+ util_.set_num_compressed_texture_formats(
+ validators_->compressed_texture_format.GetValues().size());
+
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ // We have to enable vertex array 0 on OpenGL or it won't render. Note that
+ // OpenGL ES 2.0 does not have this issue.
+ glEnableVertexAttribArray(0);
+ }
+ glGenBuffersARB(1, &attrib_0_buffer_id_);
+ glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
+ glVertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, NULL);
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glGenBuffersARB(1, &fixed_attrib_buffer_id_);
+
+ state_.texture_units.resize(group_->max_texture_units());
+ for (uint32 tt = 0; tt < state_.texture_units.size(); ++tt) {
+ glActiveTexture(GL_TEXTURE0 + tt);
+ // We want the last bind to be 2D.
+ TextureRef* ref;
+ if (features().oes_egl_image_external) {
+ ref = texture_manager()->GetDefaultTextureInfo(
+ GL_TEXTURE_EXTERNAL_OES);
+ state_.texture_units[tt].bound_texture_external_oes = ref;
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, ref ? ref->service_id() : 0);
+ }
+ if (features().arb_texture_rectangle) {
+ ref = texture_manager()->GetDefaultTextureInfo(
+ GL_TEXTURE_RECTANGLE_ARB);
+ state_.texture_units[tt].bound_texture_rectangle_arb = ref;
+ glBindTexture(GL_TEXTURE_RECTANGLE_ARB, ref ? ref->service_id() : 0);
+ }
+ ref = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP);
+ state_.texture_units[tt].bound_texture_cube_map = ref;
+ glBindTexture(GL_TEXTURE_CUBE_MAP, ref ? ref->service_id() : 0);
+ ref = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
+ state_.texture_units[tt].bound_texture_2d = ref;
+ glBindTexture(GL_TEXTURE_2D, ref ? ref->service_id() : 0);
+ }
+ glActiveTexture(GL_TEXTURE0);
+ CHECK_GL_ERROR();
+
+ if (offscreen) {
+ if (attrib_parser.samples > 0 && attrib_parser.sample_buffers > 0 &&
+ features().chromium_framebuffer_multisample) {
+ // Per ext_framebuffer_multisample spec, need max bound on sample count.
+ // max_sample_count must be initialized to a sane value. If
+ // glGetIntegerv() throws a GL error, it leaves its argument unchanged.
+ GLint max_sample_count = 1;
+ glGetIntegerv(GL_MAX_SAMPLES_EXT, &max_sample_count);
+ offscreen_target_samples_ = std::min(attrib_parser.samples,
+ max_sample_count);
+ } else {
+ offscreen_target_samples_ = 1;
+ }
+ offscreen_target_buffer_preserved_ = attrib_parser.buffer_preserved;
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ const bool rgb8_supported =
+ context_->HasExtension("GL_OES_rgb8_rgba8");
+ // The only available default render buffer formats in GLES2 have very
+ // little precision. Don't enable multisampling unless 8-bit render
+ // buffer formats are available--instead fall back to 8-bit textures.
+ if (rgb8_supported && offscreen_target_samples_ > 1) {
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA8 : GL_RGB8;
+ } else {
+ offscreen_target_samples_ = 1;
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+ }
+
+ // ANGLE only supports packed depth/stencil formats, so use it if it is
+ // available.
+ const bool depth24_stencil8_supported =
+ feature_info_->feature_flags().packed_depth24_stencil8;
+ VLOG(1) << "GL_OES_packed_depth_stencil "
+ << (depth24_stencil8_supported ? "" : "not ") << "supported.";
+ if ((attrib_parser.depth_size > 0 || attrib_parser.stencil_size > 0) &&
+ depth24_stencil8_supported) {
+ offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
+ offscreen_target_stencil_format_ = 0;
+ } else {
+ // It may be the case that this depth/stencil combination is not
+ // supported, but this will be checked later by CheckFramebufferStatus.
+ offscreen_target_depth_format_ = attrib_parser.depth_size > 0 ?
+ GL_DEPTH_COMPONENT16 : 0;
+ offscreen_target_stencil_format_ = attrib_parser.stencil_size > 0 ?
+ GL_STENCIL_INDEX8 : 0;
+ }
+ } else {
+ offscreen_target_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+
+ // If depth is requested at all, use the packed depth stencil format if
+ // it's available, as some desktop GL drivers don't support any non-packed
+ // formats for depth attachments.
+ const bool depth24_stencil8_supported =
+ feature_info_->feature_flags().packed_depth24_stencil8;
+ VLOG(1) << "GL_EXT_packed_depth_stencil "
+ << (depth24_stencil8_supported ? "" : "not ") << "supported.";
+
+ if ((attrib_parser.depth_size > 0 || attrib_parser.stencil_size > 0) &&
+ depth24_stencil8_supported) {
+ offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
+ offscreen_target_stencil_format_ = 0;
+ } else {
+ offscreen_target_depth_format_ = attrib_parser.depth_size > 0 ?
+ GL_DEPTH_COMPONENT : 0;
+ offscreen_target_stencil_format_ = attrib_parser.stencil_size > 0 ?
+ GL_STENCIL_INDEX : 0;
+ }
+ }
+
+ offscreen_saved_color_format_ = attrib_parser.alpha_size > 0 ?
+ GL_RGBA : GL_RGB;
+
+ // Create the target frame buffer. This is the one that the client renders
+ // directly to.
+ offscreen_target_frame_buffer_.reset(new BackFramebuffer(this));
+ offscreen_target_frame_buffer_->Create();
+ // Due to GLES2 format limitations, either the color texture (for
+ // non-multisampling) or the color render buffer (for multisampling) will be
+ // attached to the offscreen frame buffer. The render buffer has more
+ // limited formats available to it, but the texture can't do multisampling.
+ if (IsOffscreenBufferMultisampled()) {
+ offscreen_target_color_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_color_render_buffer_->Create();
+ } else {
+ offscreen_target_color_texture_.reset(new BackTexture(
+ memory_tracker(), &state_));
+ offscreen_target_color_texture_->Create();
+ }
+ offscreen_target_depth_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_depth_render_buffer_->Create();
+ offscreen_target_stencil_render_buffer_.reset(new BackRenderbuffer(
+ renderbuffer_manager(), memory_tracker(), &state_));
+ offscreen_target_stencil_render_buffer_->Create();
+
+ // Create the saved offscreen texture. The target frame buffer is copied
+ // here when SwapBuffers is called.
+ offscreen_saved_frame_buffer_.reset(new BackFramebuffer(this));
+ offscreen_saved_frame_buffer_->Create();
+ //
+ offscreen_saved_color_texture_.reset(new BackTexture(
+ memory_tracker(), &state_));
+ offscreen_saved_color_texture_->Create();
+
+ // Allocate the render buffers at their initial size and check the status
+ // of the frame buffers is okay.
+ if (!ResizeOffscreenFrameBuffer(size)) {
+ LOG(ERROR) << "Could not allocate offscreen buffer storage.";
+ Destroy(true);
+ return false;
+ }
+
+ // Allocate the offscreen saved color texture.
+ DCHECK(offscreen_saved_color_format_);
+ offscreen_saved_color_texture_->AllocateStorage(
+ gfx::Size(1, 1), offscreen_saved_color_format_, true);
+
+ offscreen_saved_frame_buffer_->AttachRenderTexture(
+ offscreen_saved_color_texture_.get());
+ if (offscreen_saved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "Offscreen saved FBO was incomplete.";
+ Destroy(true);
+ return false;
+ }
+
+ // Bind to the new default frame buffer (the offscreen target frame buffer).
+ // This should now be associated with ID zero.
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0);
+ } else {
+ glBindFramebufferEXT(GL_FRAMEBUFFER, GetBackbufferServiceId());
+ // These are NOT if the back buffer has these proprorties. They are
+ // if we want the command buffer to enforce them regardless of what
+ // the real backbuffer is assuming the real back buffer gives us more than
+ // we ask for. In other words, if we ask for RGB and we get RGBA then we'll
+ // make it appear RGB. If on the other hand we ask for RGBA nd get RGB we
+ // can't do anything about that.
+
+ if (!surfaceless_) {
+ GLint v = 0;
+ glGetIntegerv(GL_ALPHA_BITS, &v);
+ // This checks if the user requested RGBA and we have RGBA then RGBA. If
+ // the user requested RGB then RGB. If the user did not specify a
+ // preference than use whatever we were given. Same for DEPTH and STENCIL.
+ back_buffer_color_format_ =
+ (attrib_parser.alpha_size != 0 && v > 0) ? GL_RGBA : GL_RGB;
+ glGetIntegerv(GL_DEPTH_BITS, &v);
+ back_buffer_has_depth_ = attrib_parser.depth_size != 0 && v > 0;
+ glGetIntegerv(GL_STENCIL_BITS, &v);
+ back_buffer_has_stencil_ = attrib_parser.stencil_size != 0 && v > 0;
+ }
+ }
+
+ // OpenGL ES 2.0 implicitly enables the desktop GL capability
+ // VERTEX_PROGRAM_POINT_SIZE and doesn't expose this enum. This fact
+ // isn't well documented; it was discovered in the Khronos OpenGL ES
+ // mailing list archives. It also implicitly enables the desktop GL
+ // capability GL_POINT_SPRITE to provide access to the gl_PointCoord
+ // variable in fragment shaders.
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
+ glEnable(GL_POINT_SPRITE);
+ }
+
+ has_robustness_extension_ =
+ context->HasExtension("GL_ARB_robustness") ||
+ context->HasExtension("GL_EXT_robustness");
+
+ if (!InitializeShaderTranslator()) {
+ return false;
+ }
+
+ state_.viewport_width = size.width();
+ state_.viewport_height = size.height();
+
+ GLint viewport_params[4] = { 0 };
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, viewport_params);
+ viewport_max_width_ = viewport_params[0];
+ viewport_max_height_ = viewport_params[1];
+
+ state_.scissor_width = state_.viewport_width;
+ state_.scissor_height = state_.viewport_height;
+
+ // Set all the default state because some GL drivers get it wrong.
+ state_.InitCapabilities(NULL);
+ state_.InitState(NULL);
+ glActiveTexture(GL_TEXTURE0 + state_.active_texture_unit);
+
+ DoBindBuffer(GL_ARRAY_BUFFER, 0);
+ DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0);
+ DoBindRenderbuffer(GL_RENDERBUFFER, 0);
+
+ bool call_gl_clear = !surfaceless_;
+#if defined(OS_ANDROID)
+ // Temporary workaround for Android WebView because this clear ignores the
+ // clip and corrupts that external UI of the App. Not calling glClear is ok
+ // because the system already clears the buffer before each draw. Proper
+ // fix might be setting the scissor clip properly before initialize. See
+ // crbug.com/259023 for details.
+ call_gl_clear = surface_->GetHandle();
+#endif
+ if (call_gl_clear) {
+ // Clear the backbuffer.
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ }
+
+ supports_post_sub_buffer_ = surface->SupportsPostSubBuffer();
+ if (feature_info_->workarounds()
+ .disable_post_sub_buffers_for_onscreen_surfaces &&
+ !surface->IsOffscreen())
+ supports_post_sub_buffer_ = false;
+
+ if (feature_info_->workarounds().reverse_point_sprite_coord_origin) {
+ glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT);
+ }
+
+ if (feature_info_->workarounds().unbind_fbo_on_context_switch) {
+ context_->SetUnbindFboOnMakeCurrent();
+ }
+
+ // Only compositor contexts are known to use only the subset of GL
+ // that can be safely migrated between the iGPU and the dGPU. Mark
+ // those contexts as safe to forcibly transition between the GPUs.
+ // http://crbug.com/180876, http://crbug.com/227228
+ if (!offscreen)
+ context_->SetSafeToForceGpuSwitch();
+
+ async_pixel_transfer_manager_.reset(
+ AsyncPixelTransferManager::Create(context.get()));
+ async_pixel_transfer_manager_->Initialize(texture_manager());
+
+ framebuffer_manager()->AddObserver(this);
+
+ return true;
+}
+
+Capabilities GLES2DecoderImpl::GetCapabilities() {
+ DCHECK(initialized());
+
+ Capabilities caps;
+
+ caps.egl_image_external =
+ feature_info_->feature_flags().oes_egl_image_external;
+ caps.texture_format_bgra8888 =
+ feature_info_->feature_flags().ext_texture_format_bgra8888;
+ caps.texture_format_etc1 =
+ feature_info_->feature_flags().oes_compressed_etc1_rgb8_texture;
+ caps.texture_format_etc1_npot =
+ caps.texture_format_etc1 && !workarounds().etc1_power_of_two_only;
+ caps.texture_rectangle = feature_info_->feature_flags().arb_texture_rectangle;
+ caps.texture_usage = feature_info_->feature_flags().angle_texture_usage;
+ caps.texture_storage = feature_info_->feature_flags().ext_texture_storage;
+ caps.discard_framebuffer =
+ feature_info_->feature_flags().ext_discard_framebuffer;
+ caps.sync_query = feature_info_->feature_flags().chromium_sync_query;
+
+#if defined(OS_MACOSX)
+ // This is unconditionally true on mac, no need to test for it at runtime.
+ caps.iosurface = true;
+#endif
+
+ caps.post_sub_buffer = supports_post_sub_buffer_;
+ caps.image = true;
+
+ return caps;
+}
+
+void GLES2DecoderImpl::UpdateCapabilities() {
+ util_.set_num_compressed_texture_formats(
+ validators_->compressed_texture_format.GetValues().size());
+ util_.set_num_shader_binary_formats(
+ validators_->shader_binary_format.GetValues().size());
+}
+
+bool GLES2DecoderImpl::InitializeShaderTranslator() {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::InitializeShaderTranslator");
+
+ if (!use_shader_translator_) {
+ return true;
+ }
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+ resources.MaxVertexAttribs = group_->max_vertex_attribs();
+ resources.MaxVertexUniformVectors =
+ group_->max_vertex_uniform_vectors();
+ resources.MaxVaryingVectors = group_->max_varying_vectors();
+ resources.MaxVertexTextureImageUnits =
+ group_->max_vertex_texture_image_units();
+ resources.MaxCombinedTextureImageUnits = group_->max_texture_units();
+ resources.MaxTextureImageUnits = group_->max_texture_image_units();
+ resources.MaxFragmentUniformVectors =
+ group_->max_fragment_uniform_vectors();
+ resources.MaxDrawBuffers = group_->max_draw_buffers();
+ resources.MaxExpressionComplexity = 256;
+ resources.MaxCallStackDepth = 256;
+
+ GLint range[2] = { 0, 0 };
+ GLint precision = 0;
+ GetShaderPrecisionFormatImpl(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ range, &precision);
+ resources.FragmentPrecisionHigh =
+ PrecisionMeetsSpecForHighpFloat(range[0], range[1], precision);
+
+ if (force_webgl_glsl_validation_) {
+ resources.OES_standard_derivatives = derivatives_explicitly_enabled_;
+ resources.EXT_frag_depth = frag_depth_explicitly_enabled_;
+ resources.EXT_draw_buffers = draw_buffers_explicitly_enabled_;
+ if (!draw_buffers_explicitly_enabled_)
+ resources.MaxDrawBuffers = 1;
+ resources.EXT_shader_texture_lod = shader_texture_lod_explicitly_enabled_;
+ } else {
+ resources.OES_standard_derivatives =
+ features().oes_standard_derivatives ? 1 : 0;
+ resources.ARB_texture_rectangle =
+ features().arb_texture_rectangle ? 1 : 0;
+ resources.OES_EGL_image_external =
+ features().oes_egl_image_external ? 1 : 0;
+ resources.EXT_draw_buffers =
+ features().ext_draw_buffers ? 1 : 0;
+ resources.EXT_frag_depth =
+ features().ext_frag_depth ? 1 : 0;
+ resources.EXT_shader_texture_lod =
+ features().ext_shader_texture_lod ? 1 : 0;
+ }
+
+ ShShaderSpec shader_spec = force_webgl_glsl_validation_ ? SH_WEBGL_SPEC
+ : SH_GLES2_SPEC;
+ if (shader_spec == SH_WEBGL_SPEC && features().enable_shader_name_hashing)
+ resources.HashFunction = &CityHash64;
+ else
+ resources.HashFunction = NULL;
+ ShaderTranslatorInterface::GlslImplementationType implementation_type =
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2 ?
+ ShaderTranslatorInterface::kGlslES : ShaderTranslatorInterface::kGlsl;
+ int driver_bug_workarounds = 0;
+ if (workarounds().needs_glsl_built_in_function_emulation)
+ driver_bug_workarounds |= SH_EMULATE_BUILT_IN_FUNCTIONS;
+ if (workarounds().init_gl_position_in_vertex_shader)
+ driver_bug_workarounds |= SH_INIT_GL_POSITION;
+ if (workarounds().unfold_short_circuit_as_ternary_operation)
+ driver_bug_workarounds |= SH_UNFOLD_SHORT_CIRCUIT;
+ if (workarounds().init_varyings_without_static_use)
+ driver_bug_workarounds |= SH_INIT_VARYINGS_WITHOUT_STATIC_USE;
+ if (workarounds().unroll_for_loop_with_sampler_array_index)
+ driver_bug_workarounds |= SH_UNROLL_FOR_LOOP_WITH_SAMPLER_ARRAY_INDEX;
+ if (workarounds().scalarize_vec_and_mat_constructor_args)
+ driver_bug_workarounds |= SH_SCALARIZE_VEC_AND_MAT_CONSTRUCTOR_ARGS;
+ if (workarounds().regenerate_struct_names)
+ driver_bug_workarounds |= SH_REGENERATE_STRUCT_NAMES;
+
+ vertex_translator_ = shader_translator_cache()->GetTranslator(
+ GL_VERTEX_SHADER,
+ shader_spec,
+ &resources,
+ implementation_type,
+ static_cast<ShCompileOptions>(driver_bug_workarounds));
+ if (!vertex_translator_.get()) {
+ LOG(ERROR) << "Could not initialize vertex shader translator.";
+ Destroy(true);
+ return false;
+ }
+
+ fragment_translator_ = shader_translator_cache()->GetTranslator(
+ GL_FRAGMENT_SHADER,
+ shader_spec,
+ &resources,
+ implementation_type,
+ static_cast<ShCompileOptions>(driver_bug_workarounds));
+ if (!fragment_translator_.get()) {
+ LOG(ERROR) << "Could not initialize fragment shader translator.";
+ Destroy(true);
+ return false;
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenBuffersHelper(GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetBuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenBuffersARB(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateBuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenFramebuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetFramebuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenFramebuffersEXT(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateFramebuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenRenderbuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetRenderbuffer(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenRenderbuffersEXT(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateRenderbuffer(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::GenTexturesHelper(GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetTexture(client_ids[ii])) {
+ return false;
+ }
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ glGenTextures(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateTexture(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteBuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Buffer* buffer = GetBuffer(client_ids[ii]);
+ if (buffer && !buffer->IsDeleted()) {
+ state_.vertex_attrib_manager->Unbind(buffer);
+ if (state_.bound_array_buffer.get() == buffer) {
+ state_.bound_array_buffer = NULL;
+ }
+ RemoveBuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteFramebuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Framebuffer* framebuffer =
+ GetFramebuffer(client_ids[ii]);
+ if (framebuffer && !framebuffer->IsDeleted()) {
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer = NULL;
+ framebuffer_state_.clear_state_dirty = true;
+ GLenum target = supports_separate_framebuffer_binds ?
+ GL_DRAW_FRAMEBUFFER_EXT : GL_FRAMEBUFFER;
+ glBindFramebufferEXT(target, GetBackbufferServiceId());
+ }
+ if (framebuffer == framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer = NULL;
+ GLenum target = supports_separate_framebuffer_binds ?
+ GL_READ_FRAMEBUFFER_EXT : GL_FRAMEBUFFER;
+ glBindFramebufferEXT(target, GetBackbufferServiceId());
+ }
+ OnFboChanged();
+ RemoveFramebuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteRenderbuffersHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Renderbuffer* renderbuffer =
+ GetRenderbuffer(client_ids[ii]);
+ if (renderbuffer && !renderbuffer->IsDeleted()) {
+ if (state_.bound_renderbuffer.get() == renderbuffer) {
+ state_.bound_renderbuffer = NULL;
+ }
+ // Unbind from current framebuffers.
+ if (supports_separate_framebuffer_binds) {
+ if (framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer
+ ->UnbindRenderbuffer(GL_READ_FRAMEBUFFER_EXT, renderbuffer);
+ }
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindRenderbuffer(GL_DRAW_FRAMEBUFFER_EXT, renderbuffer);
+ }
+ } else {
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindRenderbuffer(GL_FRAMEBUFFER, renderbuffer);
+ }
+ }
+ framebuffer_state_.clear_state_dirty = true;
+ RemoveRenderbuffer(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DeleteTexturesHelper(
+ GLsizei n, const GLuint* client_ids) {
+ bool supports_separate_framebuffer_binds =
+ features().chromium_framebuffer_multisample;
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ TextureRef* texture_ref = GetTexture(client_ids[ii]);
+ if (texture_ref) {
+ Texture* texture = texture_ref->texture();
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ // Unbind texture_ref from texture_ref units.
+ for (size_t jj = 0; jj < state_.texture_units.size(); ++jj) {
+ state_.texture_units[jj].Unbind(texture_ref);
+ }
+ // Unbind from current framebuffers.
+ if (supports_separate_framebuffer_binds) {
+ if (framebuffer_state_.bound_read_framebuffer.get()) {
+ framebuffer_state_.bound_read_framebuffer
+ ->UnbindTexture(GL_READ_FRAMEBUFFER_EXT, texture_ref);
+ }
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindTexture(GL_DRAW_FRAMEBUFFER_EXT, texture_ref);
+ }
+ } else {
+ if (framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.bound_draw_framebuffer
+ ->UnbindTexture(GL_FRAMEBUFFER, texture_ref);
+ }
+ }
+#if defined(OS_MACOSX)
+ GLuint service_id = texture->service_id();
+ if (texture->target() == GL_TEXTURE_RECTANGLE_ARB) {
+ ReleaseIOSurfaceForTexture(service_id);
+ }
+#endif
+ RemoveTexture(client_ids[ii]);
+ }
+ }
+}
+
+// } // anonymous namespace
+
+bool GLES2DecoderImpl::MakeCurrent() {
+ if (!context_.get())
+ return false;
+
+ if (!context_->MakeCurrent(surface_.get()) || WasContextLost()) {
+ LOG(ERROR) << " GLES2DecoderImpl: Context lost during MakeCurrent.";
+
+ // Some D3D drivers cannot recover from device lost in the GPU process
+ // sandbox. Allow a new GPU process to launch.
+ if (workarounds().exit_on_context_lost) {
+ LOG(ERROR) << "Exiting GPU process because some drivers cannot reset"
+ << " a D3D device in the Chrome GPU process sandbox.";
+#if defined(OS_WIN)
+ base::win::SetShouldCrashOnProcessDetach(false);
+#endif
+ exit(0);
+ }
+
+ return false;
+ }
+
+ ProcessFinishedAsyncTransfers();
+
+ // Rebind the FBO if it was unbound by the context.
+ if (workarounds().unbind_fbo_on_context_switch)
+ RestoreFramebufferBindings();
+
+ framebuffer_state_.clear_state_dirty = true;
+
+ return true;
+}
+
+void GLES2DecoderImpl::ProcessFinishedAsyncTransfers() {
+ ProcessPendingReadPixels();
+ if (engine() && query_manager_.get())
+ query_manager_->ProcessPendingTransferQueries();
+
+ // TODO(epenner): Is there a better place to do this?
+ // This needs to occur before we execute any batch of commands
+ // from the client, as the client may have recieved an async
+ // completion while issuing those commands.
+ // "DidFlushStart" would be ideal if we had such a callback.
+ async_pixel_transfer_manager_->BindCompletedAsyncTransfers();
+}
+
+static void RebindCurrentFramebuffer(
+ GLenum target,
+ Framebuffer* framebuffer,
+ GLuint back_buffer_service_id) {
+ GLuint framebuffer_id = framebuffer ? framebuffer->service_id() : 0;
+
+ if (framebuffer_id == 0) {
+ framebuffer_id = back_buffer_service_id;
+ }
+
+ glBindFramebufferEXT(target, framebuffer_id);
+}
+
+void GLES2DecoderImpl::RestoreCurrentFramebufferBindings() {
+ framebuffer_state_.clear_state_dirty = true;
+
+ if (!features().chromium_framebuffer_multisample) {
+ RebindCurrentFramebuffer(
+ GL_FRAMEBUFFER,
+ framebuffer_state_.bound_draw_framebuffer.get(),
+ GetBackbufferServiceId());
+ } else {
+ RebindCurrentFramebuffer(
+ GL_READ_FRAMEBUFFER_EXT,
+ framebuffer_state_.bound_read_framebuffer.get(),
+ GetBackbufferServiceId());
+ RebindCurrentFramebuffer(
+ GL_DRAW_FRAMEBUFFER_EXT,
+ framebuffer_state_.bound_draw_framebuffer.get(),
+ GetBackbufferServiceId());
+ }
+ OnFboChanged();
+}
+
+bool GLES2DecoderImpl::CheckFramebufferValid(
+ Framebuffer* framebuffer,
+ GLenum target, const char* func_name) {
+ if (!framebuffer) {
+ if (surfaceless_)
+ return false;
+ if (backbuffer_needs_clear_bits_) {
+ glClearColor(0, 0, 0, (GLES2Util::GetChannelsForFormat(
+ offscreen_target_color_format_) & 0x0008) != 0 ? 0 : 1);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ bool reset_draw_buffer = false;
+ if ((backbuffer_needs_clear_bits_ | GL_COLOR_BUFFER_BIT) != 0 &&
+ group_->draw_buffer() == GL_NONE) {
+ reset_draw_buffer = true;
+ GLenum buf = GL_BACK;
+ if (GetBackbufferServiceId() != 0) // emulated backbuffer
+ buf = GL_COLOR_ATTACHMENT0;
+ glDrawBuffersARB(1, &buf);
+ }
+ glClear(backbuffer_needs_clear_bits_);
+ if (reset_draw_buffer) {
+ GLenum buf = GL_NONE;
+ glDrawBuffersARB(1, &buf);
+ }
+ backbuffer_needs_clear_bits_ = 0;
+ RestoreClearState();
+ }
+ return true;
+ }
+
+ if (framebuffer_manager()->IsComplete(framebuffer)) {
+ return true;
+ }
+
+ GLenum completeness = framebuffer->IsPossiblyComplete();
+ if (completeness != GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name, "framebuffer incomplete");
+ return false;
+ }
+
+ // Are all the attachments cleared?
+ if (renderbuffer_manager()->HaveUnclearedRenderbuffers() ||
+ texture_manager()->HaveUnclearedMips()) {
+ if (!framebuffer->IsCleared()) {
+ // Can we clear them?
+ if (framebuffer->GetStatus(texture_manager(), target) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name,
+ "framebuffer incomplete (clear)");
+ return false;
+ }
+ ClearUnclearedAttachments(target, framebuffer);
+ }
+ }
+
+ if (!framebuffer_manager()->IsComplete(framebuffer)) {
+ if (framebuffer->GetStatus(texture_manager(), target) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name,
+ "framebuffer incomplete (check)");
+ return false;
+ }
+ framebuffer_manager()->MarkAsComplete(framebuffer);
+ }
+
+ // NOTE: At this point we don't know if the framebuffer is complete but
+ // we DO know that everything that needs to be cleared has been cleared.
+ return true;
+}
+
+bool GLES2DecoderImpl::CheckBoundFramebuffersValid(const char* func_name) {
+ if (!features().chromium_framebuffer_multisample) {
+ bool valid = CheckFramebufferValid(
+ framebuffer_state_.bound_draw_framebuffer.get(), GL_FRAMEBUFFER_EXT,
+ func_name);
+
+ if (valid)
+ OnUseFramebuffer();
+
+ return valid;
+ }
+ return CheckFramebufferValid(framebuffer_state_.bound_draw_framebuffer.get(),
+ GL_DRAW_FRAMEBUFFER_EXT,
+ func_name) &&
+ CheckFramebufferValid(framebuffer_state_.bound_read_framebuffer.get(),
+ GL_READ_FRAMEBUFFER_EXT,
+ func_name);
+}
+
+bool GLES2DecoderImpl::CheckBoundReadFramebufferColorAttachment(
+ const char* func_name) {
+ Framebuffer* framebuffer = features().chromium_framebuffer_multisample ?
+ framebuffer_state_.bound_read_framebuffer.get() :
+ framebuffer_state_.bound_draw_framebuffer.get();
+ if (!framebuffer)
+ return true;
+ if (framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0) == NULL) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name, "no color image attached");
+ return false;
+ }
+ return true;
+}
+
+gfx::Size GLES2DecoderImpl::GetBoundReadFrameBufferSize() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ const Framebuffer::Attachment* attachment =
+ framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0);
+ if (attachment) {
+ return gfx::Size(attachment->width(), attachment->height());
+ }
+ return gfx::Size(0, 0);
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_size_;
+ } else {
+ return surface_->GetSize();
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundReadFrameBufferTextureType() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentTextureType();
+ } else {
+ return GL_UNSIGNED_BYTE;
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundReadFrameBufferInternalFormat() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentFormat();
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_color_format_;
+ } else {
+ return back_buffer_color_format_;
+ }
+}
+
+GLenum GLES2DecoderImpl::GetBoundDrawFrameBufferInternalFormat() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer != NULL) {
+ return framebuffer->GetColorAttachmentFormat();
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_color_format_;
+ } else {
+ return back_buffer_color_format_;
+ }
+}
+
+void GLES2DecoderImpl::UpdateParentTextureInfo() {
+ if (!offscreen_saved_color_texture_info_.get())
+ return;
+ GLenum target = offscreen_saved_color_texture_info_->texture()->target();
+ glBindTexture(target, offscreen_saved_color_texture_info_->service_id());
+ texture_manager()->SetLevelInfo(
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_2D,
+ 0, // level
+ GL_RGBA,
+ offscreen_size_.width(),
+ offscreen_size_.height(),
+ 1, // depth
+ 0, // border
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ texture_manager()->SetParameteri(
+ "UpdateParentTextureInfo",
+ GetErrorState(),
+ offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ glBindTexture(target, texture_ref ? texture_ref->service_id() : 0);
+}
+
+void GLES2DecoderImpl::SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) {
+ resize_callback_ = callback;
+}
+
+Logger* GLES2DecoderImpl::GetLogger() {
+ return &logger_;
+}
+
+void GLES2DecoderImpl::BeginDecoding() {
+ gpu_tracer_->BeginDecoding();
+ gpu_trace_commands_ = gpu_tracer_->IsTracing();
+ gpu_debug_commands_ = log_commands() || debug() || gpu_trace_commands_ ||
+ (*cb_command_trace_category_ != 0);
+}
+
+void GLES2DecoderImpl::EndDecoding() {
+ gpu_tracer_->EndDecoding();
+}
+
+ErrorState* GLES2DecoderImpl::GetErrorState() {
+ return state_.GetErrorState();
+}
+
+void GLES2DecoderImpl::SetShaderCacheCallback(
+ const ShaderCacheCallback& callback) {
+ shader_cache_callback_ = callback;
+}
+
+void GLES2DecoderImpl::SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) {
+ wait_sync_point_callback_ = callback;
+}
+
+AsyncPixelTransferManager*
+ GLES2DecoderImpl::GetAsyncPixelTransferManager() {
+ return async_pixel_transfer_manager_.get();
+}
+
+void GLES2DecoderImpl::ResetAsyncPixelTransferManagerForTest() {
+ async_pixel_transfer_manager_.reset();
+}
+
+void GLES2DecoderImpl::SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) {
+ async_pixel_transfer_manager_ = make_scoped_ptr(manager);
+}
+
+bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ TextureRef* texture_ref = texture_manager()->GetTexture(client_texture_id);
+ if (texture_ref) {
+ *service_texture_id = texture_ref->service_id();
+ return true;
+ }
+ return false;
+}
+
+uint32 GLES2DecoderImpl::GetTextureUploadCount() {
+ return texture_state_.texture_upload_count +
+ async_pixel_transfer_manager_->GetTextureUploadCount();
+}
+
+base::TimeDelta GLES2DecoderImpl::GetTotalTextureUploadTime() {
+ return texture_state_.total_texture_upload_time +
+ async_pixel_transfer_manager_->GetTotalTextureUploadTime();
+}
+
+base::TimeDelta GLES2DecoderImpl::GetTotalProcessingCommandsTime() {
+ return total_processing_commands_time_;
+}
+
+void GLES2DecoderImpl::AddProcessingCommandsTime(base::TimeDelta time) {
+ total_processing_commands_time_ += time;
+}
+
+void GLES2DecoderImpl::Destroy(bool have_context) {
+ if (!initialized())
+ return;
+
+ DCHECK(!have_context || context_->IsCurrent(NULL));
+
+ // Unbind everything.
+ state_.vertex_attrib_manager = NULL;
+ state_.default_vertex_attrib_manager = NULL;
+ state_.texture_units.clear();
+ state_.bound_array_buffer = NULL;
+ state_.current_queries.clear();
+ framebuffer_state_.bound_read_framebuffer = NULL;
+ framebuffer_state_.bound_draw_framebuffer = NULL;
+ state_.bound_renderbuffer = NULL;
+
+ if (offscreen_saved_color_texture_info_.get()) {
+ DCHECK(offscreen_target_color_texture_);
+ DCHECK_EQ(offscreen_saved_color_texture_info_->service_id(),
+ offscreen_saved_color_texture_->id());
+ offscreen_saved_color_texture_->Invalidate();
+ offscreen_saved_color_texture_info_ = NULL;
+ }
+ if (have_context) {
+ if (copy_texture_CHROMIUM_.get()) {
+ copy_texture_CHROMIUM_->Destroy();
+ copy_texture_CHROMIUM_.reset();
+ }
+
+ if (state_.current_program.get()) {
+ program_manager()->UnuseProgram(shader_manager(),
+ state_.current_program.get());
+ }
+
+ if (attrib_0_buffer_id_) {
+ glDeleteBuffersARB(1, &attrib_0_buffer_id_);
+ }
+ if (fixed_attrib_buffer_id_) {
+ glDeleteBuffersARB(1, &fixed_attrib_buffer_id_);
+ }
+
+ if (validation_texture_) {
+ glDeleteTextures(1, &validation_texture_);
+ glDeleteFramebuffersEXT(1, &validation_fbo_multisample_);
+ glDeleteFramebuffersEXT(1, &validation_fbo_);
+ }
+
+ if (offscreen_target_frame_buffer_.get())
+ offscreen_target_frame_buffer_->Destroy();
+ if (offscreen_target_color_texture_.get())
+ offscreen_target_color_texture_->Destroy();
+ if (offscreen_target_color_render_buffer_.get())
+ offscreen_target_color_render_buffer_->Destroy();
+ if (offscreen_target_depth_render_buffer_.get())
+ offscreen_target_depth_render_buffer_->Destroy();
+ if (offscreen_target_stencil_render_buffer_.get())
+ offscreen_target_stencil_render_buffer_->Destroy();
+ if (offscreen_saved_frame_buffer_.get())
+ offscreen_saved_frame_buffer_->Destroy();
+ if (offscreen_saved_color_texture_.get())
+ offscreen_saved_color_texture_->Destroy();
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Destroy();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Destroy();
+ } else {
+ if (offscreen_target_frame_buffer_.get())
+ offscreen_target_frame_buffer_->Invalidate();
+ if (offscreen_target_color_texture_.get())
+ offscreen_target_color_texture_->Invalidate();
+ if (offscreen_target_color_render_buffer_.get())
+ offscreen_target_color_render_buffer_->Invalidate();
+ if (offscreen_target_depth_render_buffer_.get())
+ offscreen_target_depth_render_buffer_->Invalidate();
+ if (offscreen_target_stencil_render_buffer_.get())
+ offscreen_target_stencil_render_buffer_->Invalidate();
+ if (offscreen_saved_frame_buffer_.get())
+ offscreen_saved_frame_buffer_->Invalidate();
+ if (offscreen_saved_color_texture_.get())
+ offscreen_saved_color_texture_->Invalidate();
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Invalidate();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Invalidate();
+ }
+
+ // Current program must be cleared after calling ProgramManager::UnuseProgram.
+ // Otherwise, we can leak objects. http://crbug.com/258772.
+ // state_.current_program must be reset before group_ is reset because
+ // the later deletes the ProgramManager object that referred by
+ // state_.current_program object.
+ state_.current_program = NULL;
+
+ copy_texture_CHROMIUM_.reset();
+
+ if (query_manager_.get()) {
+ query_manager_->Destroy(have_context);
+ query_manager_.reset();
+ }
+
+ if (vertex_array_manager_ .get()) {
+ vertex_array_manager_->Destroy(have_context);
+ vertex_array_manager_.reset();
+ }
+
+ if (image_manager_.get()) {
+ image_manager_->Destroy(have_context);
+ image_manager_.reset();
+ }
+
+ offscreen_target_frame_buffer_.reset();
+ offscreen_target_color_texture_.reset();
+ offscreen_target_color_render_buffer_.reset();
+ offscreen_target_depth_render_buffer_.reset();
+ offscreen_target_stencil_render_buffer_.reset();
+ offscreen_saved_frame_buffer_.reset();
+ offscreen_saved_color_texture_.reset();
+ offscreen_resolved_frame_buffer_.reset();
+ offscreen_resolved_color_texture_.reset();
+
+ // Need to release these before releasing |group_| which may own the
+ // ShaderTranslatorCache.
+ fragment_translator_ = NULL;
+ vertex_translator_ = NULL;
+
+ // Should destroy the transfer manager before the texture manager held
+ // by the context group.
+ async_pixel_transfer_manager_.reset();
+
+ if (group_.get()) {
+ framebuffer_manager()->RemoveObserver(this);
+ group_->Destroy(this, have_context);
+ group_ = NULL;
+ }
+
+ if (context_.get()) {
+ context_->ReleaseCurrent(NULL);
+ context_ = NULL;
+ }
+
+#if defined(OS_MACOSX)
+ for (TextureToIOSurfaceMap::iterator it = texture_to_io_surface_map_.begin();
+ it != texture_to_io_surface_map_.end(); ++it) {
+ CFRelease(it->second);
+ }
+ texture_to_io_surface_map_.clear();
+#endif
+}
+
+void GLES2DecoderImpl::SetSurface(
+ const scoped_refptr<gfx::GLSurface>& surface) {
+ DCHECK(context_->IsCurrent(NULL));
+ DCHECK(surface_.get());
+ surface_ = surface;
+ RestoreCurrentFramebufferBindings();
+}
+
+void GLES2DecoderImpl::ProduceFrontBuffer(const Mailbox& mailbox) {
+ if (!offscreen_saved_color_texture_.get()) {
+ LOG(ERROR) << "Called ProduceFrontBuffer on a non-offscreen context";
+ return;
+ }
+ if (!offscreen_saved_color_texture_info_.get()) {
+ GLuint service_id = offscreen_saved_color_texture_->id();
+ offscreen_saved_color_texture_info_ = TextureRef::Create(
+ texture_manager(), 0, service_id);
+ texture_manager()->SetTarget(offscreen_saved_color_texture_info_.get(),
+ GL_TEXTURE_2D);
+ UpdateParentTextureInfo();
+ }
+ mailbox_manager()->ProduceTexture(
+ GL_TEXTURE_2D, mailbox, offscreen_saved_color_texture_info_->texture());
+}
+
+bool GLES2DecoderImpl::ResizeOffscreenFrameBuffer(const gfx::Size& size) {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ if (!is_offscreen) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer called "
+ << " with an onscreen framebuffer.";
+ return false;
+ }
+
+ if (offscreen_size_ == size)
+ return true;
+
+ offscreen_size_ = size;
+ int w = offscreen_size_.width();
+ int h = offscreen_size_.height();
+ if (w < 0 || h < 0 || h >= (INT_MAX / 4) / (w ? w : 1)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage due to excessive dimensions.";
+ return false;
+ }
+
+ // Reallocate the offscreen target buffers.
+ DCHECK(offscreen_target_color_format_);
+ if (IsOffscreenBufferMultisampled()) {
+ if (!offscreen_target_color_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_color_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target color buffer.";
+ return false;
+ }
+ } else {
+ if (!offscreen_target_color_texture_->AllocateStorage(
+ offscreen_size_, offscreen_target_color_format_, false)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target color texture.";
+ return false;
+ }
+ }
+ if (offscreen_target_depth_format_ &&
+ !offscreen_target_depth_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_depth_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target depth buffer.";
+ return false;
+ }
+ if (offscreen_target_stencil_format_ &&
+ !offscreen_target_stencil_render_buffer_->AllocateStorage(
+ feature_info_.get(),
+ offscreen_size_,
+ offscreen_target_stencil_format_,
+ offscreen_target_samples_)) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "to allocate storage for offscreen target stencil buffer.";
+ return false;
+ }
+
+ // Attach the offscreen target buffers to the target frame buffer.
+ if (IsOffscreenBufferMultisampled()) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_COLOR_ATTACHMENT0,
+ offscreen_target_color_render_buffer_.get());
+ } else {
+ offscreen_target_frame_buffer_->AttachRenderTexture(
+ offscreen_target_color_texture_.get());
+ }
+ if (offscreen_target_depth_format_) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_DEPTH_ATTACHMENT,
+ offscreen_target_depth_render_buffer_.get());
+ }
+ const bool packed_depth_stencil =
+ offscreen_target_depth_format_ == GL_DEPTH24_STENCIL8;
+ if (packed_depth_stencil) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_STENCIL_ATTACHMENT,
+ offscreen_target_depth_render_buffer_.get());
+ } else if (offscreen_target_stencil_format_) {
+ offscreen_target_frame_buffer_->AttachRenderBuffer(
+ GL_STENCIL_ATTACHMENT,
+ offscreen_target_stencil_render_buffer_.get());
+ }
+
+ if (offscreen_target_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "because offscreen FBO was incomplete.";
+ return false;
+ }
+
+ // Clear the target frame buffer.
+ {
+ ScopedFrameBufferBinder binder(this, offscreen_target_frame_buffer_->id());
+ glClearColor(0, 0, 0, (GLES2Util::GetChannelsForFormat(
+ offscreen_target_color_format_) & 0x0008) != 0 ? 0 : 1);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(0);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ RestoreClearState();
+ }
+
+ // Destroy the offscreen resolved framebuffers.
+ if (offscreen_resolved_frame_buffer_.get())
+ offscreen_resolved_frame_buffer_->Destroy();
+ if (offscreen_resolved_color_texture_.get())
+ offscreen_resolved_color_texture_->Destroy();
+ offscreen_resolved_color_texture_.reset();
+ offscreen_resolved_frame_buffer_.reset();
+
+ return true;
+}
+
+error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ResizeCHROMIUM& c =
+ *static_cast<const gles2::cmds::ResizeCHROMIUM*>(cmd_data);
+ if (!offscreen_target_frame_buffer_.get() && surface_->DeferDraws())
+ return error::kDeferCommandUntilLater;
+
+ GLuint width = static_cast<GLuint>(c.width);
+ GLuint height = static_cast<GLuint>(c.height);
+ GLfloat scale_factor = c.scale_factor;
+ TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
+
+ width = std::max(1U, width);
+ height = std::max(1U, height);
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && \
+ !defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
+ // Make sure that we are done drawing to the back buffer before resizing.
+ glFinish();
+#endif
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ if (is_offscreen) {
+ if (!ResizeOffscreenFrameBuffer(gfx::Size(width, height))) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because "
+ << "ResizeOffscreenFrameBuffer failed.";
+ return error::kLostContext;
+ }
+ }
+
+ if (!resize_callback_.is_null()) {
+ resize_callback_.Run(gfx::Size(width, height), scale_factor);
+ DCHECK(context_->IsCurrent(surface_.get()));
+ if (!context_->IsCurrent(surface_.get())) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because context no longer "
+ << "current after resize callback.";
+ return error::kLostContext;
+ }
+ }
+
+ return error::kNoError;
+}
+
+const char* GLES2DecoderImpl::GetCommandName(unsigned int command_id) const {
+ if (command_id > kStartPoint && command_id < kNumCommands) {
+ return gles2::GetCommandName(static_cast<CommandId>(command_id));
+ }
+ return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
+}
+
+// Decode a command, and call the corresponding GL functions.
+// NOTE: DoCommand() is slower than calling DoCommands() on larger batches
+// of commands at once, and is now only used for tests that need to track
+// individual commands.
+error::Error GLES2DecoderImpl::DoCommand(unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) {
+ return DoCommands(1, cmd_data, arg_count + 1, 0);
+}
+
+// Decode multiple commands, and call the corresponding GL functions.
+// NOTE: 'buffer' is a pointer to the command buffer. As such, it could be
+// changed by a (malicious) client at any time, so if validation has to happen,
+// it should operate on a copy of them.
+// NOTE: This is duplicating code from AsyncAPIInterface::DoCommands() in the
+// interest of performance in this critical execution loop.
+template <bool DebugImpl>
+error::Error GLES2DecoderImpl::DoCommandsImpl(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ commands_to_process_ = num_commands;
+ error::Error result = error::kNoError;
+ const CommandBufferEntry* cmd_data =
+ static_cast<const CommandBufferEntry*>(buffer);
+ int process_pos = 0;
+ unsigned int command = 0;
+
+ while (process_pos < num_entries && result == error::kNoError &&
+ commands_to_process_--) {
+ const unsigned int size = cmd_data->value_header.size;
+ command = cmd_data->value_header.command;
+
+ if (size == 0) {
+ result = error::kInvalidSize;
+ break;
+ }
+
+ if (static_cast<int>(size) + process_pos > num_entries) {
+ result = error::kOutOfBounds;
+ break;
+ }
+
+ if (DebugImpl) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
+ GetCommandName(command));
+
+ if (log_commands()) {
+ LOG(ERROR) << "[" << logger_.GetLogPrefix() << "]"
+ << "cmd: " << GetCommandName(command);
+ }
+ }
+
+ const unsigned int arg_count = size - 1;
+ unsigned int command_index = command - kStartPoint - 1;
+ if (command_index < arraysize(command_info)) {
+ const CommandInfo& info = command_info[command_index];
+ unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
+ if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
+ (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
+ bool doing_gpu_trace = false;
+ if (DebugImpl && gpu_trace_commands_) {
+ if (CMD_FLAG_GET_TRACE_LEVEL(info.cmd_flags) <= gpu_trace_level_) {
+ doing_gpu_trace = true;
+ gpu_tracer_->Begin(GetCommandName(command), kTraceDecoder);
+ }
+ }
+
+ uint32 immediate_data_size = (arg_count - info_arg_count) *
+ sizeof(CommandBufferEntry); // NOLINT
+
+ result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
+
+ if (DebugImpl && doing_gpu_trace)
+ gpu_tracer_->End(kTraceDecoder);
+
+ if (DebugImpl && debug()) {
+ GLenum error;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
+ << "GL ERROR: " << GLES2Util::GetStringEnum(error)
+ << " : " << GetCommandName(command);
+ LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
+ }
+ }
+ } else {
+ result = error::kInvalidArguments;
+ }
+ } else {
+ result = DoCommonCommand(command, arg_count, cmd_data);
+ }
+
+ if (DebugImpl) {
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
+ GetCommandName(command));
+ }
+
+ if (result == error::kNoError &&
+ current_decoder_error_ != error::kNoError) {
+ result = current_decoder_error_;
+ current_decoder_error_ = error::kNoError;
+ }
+
+ if (result != error::kDeferCommandUntilLater) {
+ process_pos += size;
+ cmd_data += size;
+ }
+ }
+
+ if (entries_processed)
+ *entries_processed = process_pos;
+
+ if (error::IsError(result)) {
+ LOG(ERROR) << "Error: " << result << " for Command "
+ << GetCommandName(command);
+ }
+
+ return result;
+}
+
+error::Error GLES2DecoderImpl::DoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ if (gpu_debug_commands_) {
+ return DoCommandsImpl<true>(
+ num_commands, buffer, num_entries, entries_processed);
+ } else {
+ return DoCommandsImpl<false>(
+ num_commands, buffer, num_entries, entries_processed);
+ }
+}
+
+void GLES2DecoderImpl::RemoveBuffer(GLuint client_id) {
+ buffer_manager()->RemoveBuffer(client_id);
+}
+
+bool GLES2DecoderImpl::CreateProgramHelper(GLuint client_id) {
+ if (GetProgram(client_id)) {
+ return false;
+ }
+ GLuint service_id = glCreateProgram();
+ if (service_id != 0) {
+ CreateProgram(client_id, service_id);
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::CreateShaderHelper(GLenum type, GLuint client_id) {
+ if (GetShader(client_id)) {
+ return false;
+ }
+ GLuint service_id = glCreateShader(type);
+ if (service_id != 0) {
+ CreateShader(client_id, service_id, type);
+ }
+ return true;
+}
+
+void GLES2DecoderImpl::DoFinish() {
+ glFinish();
+ ProcessPendingReadPixels();
+ ProcessPendingQueries();
+}
+
+void GLES2DecoderImpl::DoFlush() {
+ glFlush();
+ ProcessPendingQueries();
+}
+
+void GLES2DecoderImpl::DoActiveTexture(GLenum texture_unit) {
+ GLuint texture_index = texture_unit - GL_TEXTURE0;
+ if (texture_index >= state_.texture_units.size()) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glActiveTexture", texture_unit, "texture_unit");
+ return;
+ }
+ state_.active_texture_unit = texture_index;
+ glActiveTexture(texture_unit);
+}
+
+void GLES2DecoderImpl::DoBindBuffer(GLenum target, GLuint client_id) {
+ Buffer* buffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ buffer = GetBuffer(client_id);
+ if (!buffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindBuffer",
+ "id not generated by glGenBuffers");
+ return;
+ }
+
+ // It's a new id so make a buffer buffer for it.
+ glGenBuffersARB(1, &service_id);
+ CreateBuffer(client_id, service_id);
+ buffer = GetBuffer(client_id);
+ }
+ }
+ LogClientServiceForInfo(buffer, client_id, "glBindBuffer");
+ if (buffer) {
+ if (!buffer_manager()->SetTarget(buffer, target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindBuffer", "buffer bound to more than 1 target");
+ return;
+ }
+ service_id = buffer->service_id();
+ }
+ switch (target) {
+ case GL_ARRAY_BUFFER:
+ state_.bound_array_buffer = buffer;
+ break;
+ case GL_ELEMENT_ARRAY_BUFFER:
+ state_.vertex_attrib_manager->SetElementArrayBuffer(buffer);
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+ glBindBuffer(target, service_id);
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasColorAttachmentWithAlpha(
+ bool all_draw_buffers) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (!all_draw_buffers || !framebuffer) {
+ return (GLES2Util::GetChannelsForFormat(
+ GetBoundDrawFrameBufferInternalFormat()) & 0x0008) != 0;
+ }
+ return framebuffer->HasAlphaMRT();
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasDepthAttachment() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ return framebuffer->HasDepthAttachment();
+ }
+ if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_depth_format_ != 0;
+ }
+ return back_buffer_has_depth_;
+}
+
+bool GLES2DecoderImpl::BoundFramebufferHasStencilAttachment() {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ return framebuffer->HasStencilAttachment();
+ }
+ if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_target_stencil_format_ != 0 ||
+ offscreen_target_depth_format_ == GL_DEPTH24_STENCIL8;
+ }
+ return back_buffer_has_stencil_;
+}
+
+void GLES2DecoderImpl::ApplyDirtyState() {
+ if (framebuffer_state_.clear_state_dirty) {
+ bool have_alpha = BoundFramebufferHasColorAttachmentWithAlpha(true);
+ state_.SetDeviceColorMask(state_.color_mask_red,
+ state_.color_mask_green,
+ state_.color_mask_blue,
+ state_.color_mask_alpha && have_alpha);
+
+ bool have_depth = BoundFramebufferHasDepthAttachment();
+ state_.SetDeviceDepthMask(state_.depth_mask && have_depth);
+
+ bool have_stencil = BoundFramebufferHasStencilAttachment();
+ state_.SetDeviceStencilMaskSeparate(
+ GL_FRONT, have_stencil ? state_.stencil_front_writemask : 0);
+ state_.SetDeviceStencilMaskSeparate(
+ GL_BACK, have_stencil ? state_.stencil_back_writemask : 0);
+
+ state_.SetDeviceCapabilityState(
+ GL_DEPTH_TEST, state_.enable_flags.depth_test && have_depth);
+ state_.SetDeviceCapabilityState(
+ GL_STENCIL_TEST, state_.enable_flags.stencil_test && have_stencil);
+ framebuffer_state_.clear_state_dirty = false;
+ }
+}
+
+GLuint GLES2DecoderImpl::GetBackbufferServiceId() const {
+ return (offscreen_target_frame_buffer_.get())
+ ? offscreen_target_frame_buffer_->id()
+ : (surface_.get() ? surface_->GetBackingFrameBufferObject() : 0);
+}
+
+void GLES2DecoderImpl::RestoreState(const ContextState* prev_state) {
+ TRACE_EVENT1("gpu", "GLES2DecoderImpl::RestoreState",
+ "context", logger_.GetLogPrefix());
+ // Restore the Framebuffer first because of bugs in Intel drivers.
+ // Intel drivers incorrectly clip the viewport settings to
+ // the size of the current framebuffer object.
+ RestoreFramebufferBindings();
+ state_.RestoreState(prev_state);
+}
+
+void GLES2DecoderImpl::RestoreFramebufferBindings() const {
+ GLuint service_id =
+ framebuffer_state_.bound_draw_framebuffer.get()
+ ? framebuffer_state_.bound_draw_framebuffer->service_id()
+ : GetBackbufferServiceId();
+ if (!features().chromium_framebuffer_multisample) {
+ glBindFramebufferEXT(GL_FRAMEBUFFER, service_id);
+ } else {
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, service_id);
+ service_id = framebuffer_state_.bound_read_framebuffer.get()
+ ? framebuffer_state_.bound_read_framebuffer->service_id()
+ : GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, service_id);
+ }
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::RestoreRenderbufferBindings() {
+ state_.RestoreRenderbufferBindings();
+}
+
+void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) const {
+ Texture* texture = texture_manager()->GetTextureForServiceId(service_id);
+ if (texture) {
+ GLenum target = texture->target();
+ glBindTexture(target, service_id);
+ glTexParameteri(
+ target, GL_TEXTURE_WRAP_S, texture->wrap_s());
+ glTexParameteri(
+ target, GL_TEXTURE_WRAP_T, texture->wrap_t());
+ glTexParameteri(
+ target, GL_TEXTURE_MIN_FILTER, texture->min_filter());
+ glTexParameteri(
+ target, GL_TEXTURE_MAG_FILTER, texture->mag_filter());
+ RestoreTextureUnitBindings(state_.active_texture_unit);
+ }
+}
+
+void GLES2DecoderImpl::ClearAllAttributes() const {
+ // Must use native VAO 0, as RestoreAllAttributes can't fully restore
+ // other VAOs.
+ if (feature_info_->feature_flags().native_vertex_array_object)
+ glBindVertexArrayOES(0);
+
+ for (uint32 i = 0; i < group_->max_vertex_attribs(); ++i) {
+ if (i != 0) // Never disable attribute 0
+ glDisableVertexAttribArray(i);
+ if(features().angle_instanced_arrays)
+ glVertexAttribDivisorANGLE(i, 0);
+ }
+}
+
+void GLES2DecoderImpl::RestoreAllAttributes() const {
+ state_.RestoreVertexAttribs();
+}
+
+void GLES2DecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
+ state_.SetIgnoreCachedStateForTest(ignore);
+}
+
+void GLES2DecoderImpl::OnFboChanged() const {
+ if (workarounds().restore_scissor_on_fbo_change)
+ state_.fbo_binding_for_scissor_workaround_dirty_ = true;
+}
+
+// Called after the FBO is checked for completeness.
+void GLES2DecoderImpl::OnUseFramebuffer() const {
+ if (state_.fbo_binding_for_scissor_workaround_dirty_) {
+ state_.fbo_binding_for_scissor_workaround_dirty_ = false;
+ // The driver forgets the correct scissor when modifying the FBO binding.
+ glScissor(state_.scissor_x,
+ state_.scissor_y,
+ state_.scissor_width,
+ state_.scissor_height);
+
+ // crbug.com/222018 - Also on QualComm, the flush here avoids flicker,
+ // it's unclear how this bug works.
+ glFlush();
+ }
+}
+
+void GLES2DecoderImpl::DoBindFramebuffer(GLenum target, GLuint client_id) {
+ Framebuffer* framebuffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ framebuffer = GetFramebuffer(client_id);
+ if (!framebuffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindFramebuffer",
+ "id not generated by glGenFramebuffers");
+ return;
+ }
+
+ // It's a new id so make a framebuffer framebuffer for it.
+ glGenFramebuffersEXT(1, &service_id);
+ CreateFramebuffer(client_id, service_id);
+ framebuffer = GetFramebuffer(client_id);
+ } else {
+ service_id = framebuffer->service_id();
+ }
+ framebuffer->MarkAsValid();
+ }
+ LogClientServiceForInfo(framebuffer, client_id, "glBindFramebuffer");
+
+ if (target == GL_FRAMEBUFFER || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ framebuffer_state_.bound_draw_framebuffer = framebuffer;
+ }
+
+ // vmiura: This looks like dup code
+ if (target == GL_FRAMEBUFFER || target == GL_READ_FRAMEBUFFER_EXT) {
+ framebuffer_state_.bound_read_framebuffer = framebuffer;
+ }
+
+ framebuffer_state_.clear_state_dirty = true;
+
+ // If we are rendering to the backbuffer get the FBO id for any simulated
+ // backbuffer.
+ if (framebuffer == NULL) {
+ service_id = GetBackbufferServiceId();
+ }
+
+ glBindFramebufferEXT(target, service_id);
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoBindRenderbuffer(GLenum target, GLuint client_id) {
+ Renderbuffer* renderbuffer = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ renderbuffer = GetRenderbuffer(client_id);
+ if (!renderbuffer) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindRenderbuffer",
+ "id not generated by glGenRenderbuffers");
+ return;
+ }
+
+ // It's a new id so make a renderbuffer for it.
+ glGenRenderbuffersEXT(1, &service_id);
+ CreateRenderbuffer(client_id, service_id);
+ renderbuffer = GetRenderbuffer(client_id);
+ } else {
+ service_id = renderbuffer->service_id();
+ }
+ renderbuffer->MarkAsValid();
+ }
+ LogClientServiceForInfo(renderbuffer, client_id, "glBindRenderbuffer");
+ state_.bound_renderbuffer = renderbuffer;
+ state_.bound_renderbuffer_valid = true;
+ glBindRenderbufferEXT(GL_RENDERBUFFER, service_id);
+}
+
+void GLES2DecoderImpl::DoBindTexture(GLenum target, GLuint client_id) {
+ TextureRef* texture_ref = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ texture_ref = GetTexture(client_id);
+ if (!texture_ref) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindTexture",
+ "id not generated by glGenTextures");
+ return;
+ }
+
+ // It's a new id so make a texture texture for it.
+ glGenTextures(1, &service_id);
+ DCHECK_NE(0u, service_id);
+ CreateTexture(client_id, service_id);
+ texture_ref = GetTexture(client_id);
+ }
+ } else {
+ texture_ref = texture_manager()->GetDefaultTextureInfo(target);
+ }
+
+ // Check the texture exists
+ if (texture_ref) {
+ Texture* texture = texture_ref->texture();
+ // Check that we are not trying to bind it to a different target.
+ if (texture->target() != 0 && texture->target() != target) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBindTexture",
+ "texture bound to more than 1 target.");
+ return;
+ }
+ LogClientServiceForInfo(texture, client_id, "glBindTexture");
+ if (texture->target() == 0) {
+ texture_manager()->SetTarget(texture_ref, target);
+ }
+ glBindTexture(target, texture->service_id());
+ } else {
+ glBindTexture(target, 0);
+ }
+
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ unit.bind_target = target;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ unit.bound_texture_2d = texture_ref;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ unit.bound_texture_cube_map = texture_ref;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ unit.bound_texture_external_oes = texture_ref;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ unit.bound_texture_rectangle_arb = texture_ref;
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoDisableVertexAttribArray(GLuint index) {
+ if (state_.vertex_attrib_manager->Enable(index, false)) {
+ if (index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ glDisableVertexAttribArray(index);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glDisableVertexAttribArray", "index out of range");
+ }
+}
+
+void GLES2DecoderImpl::DoDiscardFramebufferEXT(GLenum target,
+ GLsizei numAttachments,
+ const GLenum* attachments) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+
+ // Validates the attachments. If one of them fails
+ // the whole command fails.
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ if ((framebuffer &&
+ !validators_->attachment.IsValid(attachments[i])) ||
+ (!framebuffer &&
+ !validators_->backbuffer_attachment.IsValid(attachments[i]))) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glDiscardFramebufferEXT", attachments[i], "attachments");
+ return;
+ }
+ }
+
+ // Marks each one of them as not cleared
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ if (framebuffer) {
+ framebuffer->MarkAttachmentAsCleared(renderbuffer_manager(),
+ texture_manager(),
+ attachments[i],
+ false);
+ } else {
+ switch (attachments[i]) {
+ case GL_COLOR_EXT:
+ backbuffer_needs_clear_bits_ |= GL_COLOR_BUFFER_BIT;
+ break;
+ case GL_DEPTH_EXT:
+ backbuffer_needs_clear_bits_ |= GL_DEPTH_BUFFER_BIT;
+ case GL_STENCIL_EXT:
+ backbuffer_needs_clear_bits_ |= GL_STENCIL_BUFFER_BIT;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+ }
+
+ // If the default framebuffer is bound but we are still rendering to an
+ // FBO, translate attachment names that refer to default framebuffer
+ // channels to corresponding framebuffer attachments.
+ scoped_ptr<GLenum[]> translated_attachments(new GLenum[numAttachments]);
+ for (GLsizei i = 0; i < numAttachments; ++i) {
+ GLenum attachment = attachments[i];
+ if (!framebuffer && GetBackbufferServiceId()) {
+ switch (attachment) {
+ case GL_COLOR_EXT:
+ attachment = GL_COLOR_ATTACHMENT0;
+ break;
+ case GL_DEPTH_EXT:
+ attachment = GL_DEPTH_ATTACHMENT;
+ break;
+ case GL_STENCIL_EXT:
+ attachment = GL_STENCIL_ATTACHMENT;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ }
+ translated_attachments[i] = attachment;
+ }
+
+ glDiscardFramebufferEXT(target, numAttachments, translated_attachments.get());
+}
+
+void GLES2DecoderImpl::DoEnableVertexAttribArray(GLuint index) {
+ if (state_.vertex_attrib_manager->Enable(index, true)) {
+ glEnableVertexAttribArray(index);
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glEnableVertexAttribArray", "index out of range");
+ }
+}
+
+void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref ||
+ !texture_manager()->CanGenerateMipmaps(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGenerateMipmap", "Can not generate mips");
+ return;
+ }
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ for (int i = 0; i < 6; ++i) {
+ GLenum face = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, face, 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glGenerateMipmap", "dimensions too big");
+ return;
+ }
+ }
+ } else {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, target, 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glGenerateMipmap", "dimensions too big");
+ return;
+ }
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glGenerateMipmap");
+ // Workaround for Mac driver bug. In the large scheme of things setting
+ // glTexParamter twice for glGenerateMipmap is probably not a lage performance
+ // hit so there's probably no need to make this conditional. The bug appears
+ // to be that if the filtering mode is set to something that doesn't require
+ // mipmaps for rendering, or is never set to something other than the default,
+ // then glGenerateMipmap misbehaves.
+ if (workarounds().set_texture_filter_before_generating_mipmap) {
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+ }
+ glGenerateMipmapEXT(target);
+ if (workarounds().set_texture_filter_before_generating_mipmap) {
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER,
+ texture_ref->texture()->min_filter());
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glGenerateMipmap");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->MarkMipmapsGenerated(texture_ref);
+ }
+}
+
+bool GLES2DecoderImpl::GetHelper(
+ GLenum pname, GLint* params, GLsizei* num_written) {
+ DCHECK(num_written);
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ switch (pname) {
+ case GL_IMPLEMENTATION_COLOR_READ_FORMAT:
+ *num_written = 1;
+ // Return the GL implementation's preferred format and (see below type)
+ // if we have the GL extension that exposes this. This allows the GPU
+ // client to use the implementation's preferred format for glReadPixels
+ // for optimisation.
+ //
+ // A conflicting extension (GL_ARB_ES2_compatibility) specifies an error
+ // case when requested on integer/floating point buffers but which is
+ // acceptable on GLES2 and with the GL_OES_read_format extension.
+ //
+ // Therefore if an error occurs we swallow the error and use the
+ // internal implementation.
+ if (params) {
+ if (context_->HasExtension("GL_OES_read_format")) {
+ ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::GetHelper",
+ GetErrorState());
+ glGetIntegerv(pname, params);
+ if (glGetError() == GL_NO_ERROR)
+ return true;
+ }
+ *params = GLES2Util::GetPreferredGLReadPixelsFormat(
+ GetBoundReadFrameBufferInternalFormat());
+ }
+ return true;
+ case GL_IMPLEMENTATION_COLOR_READ_TYPE:
+ *num_written = 1;
+ if (params) {
+ if (context_->HasExtension("GL_OES_read_format")) {
+ ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::GetHelper",
+ GetErrorState());
+ glGetIntegerv(pname, params);
+ if (glGetError() == GL_NO_ERROR)
+ return true;
+ }
+ *params = GLES2Util::GetPreferredGLReadPixelsType(
+ GetBoundReadFrameBufferInternalFormat(),
+ GetBoundReadFrameBufferTextureType());
+ }
+ return true;
+ case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_fragment_uniform_vectors();
+ }
+ return true;
+ case GL_MAX_VARYING_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_varying_vectors();
+ }
+ return true;
+ case GL_MAX_VERTEX_UNIFORM_VECTORS:
+ *num_written = 1;
+ if (params) {
+ *params = group_->max_vertex_uniform_vectors();
+ }
+ return true;
+ }
+ }
+ switch (pname) {
+ case GL_MAX_VIEWPORT_DIMS:
+ if (offscreen_target_frame_buffer_.get()) {
+ *num_written = 2;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_renderbuffer_size();
+ params[1] = renderbuffer_manager()->max_renderbuffer_size();
+ }
+ return true;
+ }
+ return false;
+ case GL_MAX_SAMPLES:
+ *num_written = 1;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_samples();
+ }
+ return true;
+ case GL_MAX_RENDERBUFFER_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = renderbuffer_manager()->max_renderbuffer_size();
+ }
+ return true;
+ case GL_MAX_TEXTURE_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = texture_manager()->MaxSizeForTarget(GL_TEXTURE_2D);
+ }
+ return true;
+ case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = texture_manager()->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP);
+ }
+ return true;
+ case GL_MAX_COLOR_ATTACHMENTS_EXT:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->max_color_attachments();
+ }
+ return true;
+ case GL_MAX_DRAW_BUFFERS_ARB:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->max_draw_buffers();
+ }
+ return true;
+ case GL_ALPHA_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_ALPHA_BITS, &v);
+ params[0] = BoundFramebufferHasColorAttachmentWithAlpha(false) ? v : 0;
+ }
+ return true;
+ case GL_DEPTH_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_DEPTH_BITS, &v);
+ params[0] = BoundFramebufferHasDepthAttachment() ? v : 0;
+ }
+ return true;
+ case GL_STENCIL_BITS:
+ *num_written = 1;
+ if (params) {
+ GLint v = 0;
+ glGetIntegerv(GL_STENCIL_BITS, &v);
+ params[0] = BoundFramebufferHasStencilAttachment() ? v : 0;
+ }
+ return true;
+ case GL_COMPRESSED_TEXTURE_FORMATS:
+ *num_written = validators_->compressed_texture_format.GetValues().size();
+ if (params) {
+ for (GLint ii = 0; ii < *num_written; ++ii) {
+ params[ii] = validators_->compressed_texture_format.GetValues()[ii];
+ }
+ }
+ return true;
+ case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ *num_written = 1;
+ if (params) {
+ *params = validators_->compressed_texture_format.GetValues().size();
+ }
+ return true;
+ case GL_NUM_SHADER_BINARY_FORMATS:
+ *num_written = 1;
+ if (params) {
+ *params = validators_->shader_binary_format.GetValues().size();
+ }
+ return true;
+ case GL_SHADER_BINARY_FORMATS:
+ *num_written = validators_->shader_binary_format.GetValues().size();
+ if (params) {
+ for (GLint ii = 0; ii < *num_written; ++ii) {
+ params[ii] = validators_->shader_binary_format.GetValues()[ii];
+ }
+ }
+ return true;
+ case GL_SHADER_COMPILER:
+ *num_written = 1;
+ if (params) {
+ *params = GL_TRUE;
+ }
+ return true;
+ case GL_ARRAY_BUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ if (state_.bound_array_buffer.get()) {
+ GLuint client_id = 0;
+ buffer_manager()->GetClientId(state_.bound_array_buffer->service_id(),
+ &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ if (state_.vertex_attrib_manager->element_array_buffer()) {
+ GLuint client_id = 0;
+ buffer_manager()->GetClientId(
+ state_.vertex_attrib_manager->element_array_buffer()->
+ service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_FRAMEBUFFER_BINDING:
+ // case GL_DRAW_FRAMEBUFFER_BINDING_EXT: (same as GL_FRAMEBUFFER_BINDING)
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ GLuint client_id = 0;
+ framebuffer_manager()->GetClientId(
+ framebuffer->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_READ_FRAMEBUFFER_BINDING_EXT:
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER_EXT);
+ if (framebuffer) {
+ GLuint client_id = 0;
+ framebuffer_manager()->GetClientId(
+ framebuffer->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_RENDERBUFFER_BINDING:
+ *num_written = 1;
+ if (params) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (renderbuffer) {
+ *params = renderbuffer->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_CURRENT_PROGRAM:
+ *num_written = 1;
+ if (params) {
+ if (state_.current_program.get()) {
+ GLuint client_id = 0;
+ program_manager()->GetClientId(
+ state_.current_program->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_VERTEX_ARRAY_BINDING_OES:
+ *num_written = 1;
+ if (params) {
+ if (state_.vertex_attrib_manager.get() !=
+ state_.default_vertex_attrib_manager.get()) {
+ GLuint client_id = 0;
+ vertex_array_manager_->GetClientId(
+ state_.vertex_attrib_manager->service_id(), &client_id);
+ *params = client_id;
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_2D:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_2d.get()) {
+ *params = unit.bound_texture_2d->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_cube_map.get()) {
+ *params = unit.bound_texture_cube_map->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_EXTERNAL_OES:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_external_oes.get()) {
+ *params = unit.bound_texture_external_oes->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_TEXTURE_BINDING_RECTANGLE_ARB:
+ *num_written = 1;
+ if (params) {
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ if (unit.bound_texture_rectangle_arb.get()) {
+ *params = unit.bound_texture_rectangle_arb->client_id();
+ } else {
+ *params = 0;
+ }
+ }
+ return true;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_flip_y_;
+ }
+ return true;
+ case GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_premultiply_alpha_;
+ }
+ return true;
+ case GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = unpack_unpremultiply_alpha_;
+ }
+ return true;
+ case GL_BIND_GENERATES_RESOURCE_CHROMIUM:
+ *num_written = 1;
+ if (params) {
+ params[0] = group_->bind_generates_resource() ? 1 : 0;
+ }
+ return true;
+ default:
+ if (pname >= GL_DRAW_BUFFER0_ARB &&
+ pname < GL_DRAW_BUFFER0_ARB + group_->max_draw_buffers()) {
+ *num_written = 1;
+ if (params) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ params[0] = framebuffer->GetDrawBuffer(pname);
+ } else { // backbuffer
+ if (pname == GL_DRAW_BUFFER0_ARB)
+ params[0] = group_->draw_buffer();
+ else
+ params[0] = GL_NONE;
+ }
+ }
+ return true;
+ }
+ *num_written = util_.GLGetNumValuesReturned(pname);
+ return false;
+ }
+}
+
+bool GLES2DecoderImpl::GetNumValuesReturnedForGLGet(
+ GLenum pname, GLsizei* num_values) {
+ if (state_.GetStateAsGLint(pname, NULL, num_values)) {
+ return true;
+ }
+ return GetHelper(pname, NULL, num_values);
+}
+
+GLenum GLES2DecoderImpl::AdjustGetPname(GLenum pname) {
+ if (GL_MAX_SAMPLES == pname &&
+ features().use_img_for_multisampled_render_to_texture) {
+ return GL_MAX_SAMPLES_IMG;
+ }
+ return pname;
+}
+
+void GLES2DecoderImpl::DoGetBooleanv(GLenum pname, GLboolean* params) {
+ DCHECK(params);
+ GLsizei num_written = 0;
+ if (GetNumValuesReturnedForGLGet(pname, &num_written)) {
+ scoped_ptr<GLint[]> values(new GLint[num_written]);
+ if (!state_.GetStateAsGLint(pname, values.get(), &num_written)) {
+ GetHelper(pname, values.get(), &num_written);
+ }
+ for (GLsizei ii = 0; ii < num_written; ++ii) {
+ params[ii] = static_cast<GLboolean>(values[ii]);
+ }
+ } else {
+ pname = AdjustGetPname(pname);
+ glGetBooleanv(pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetFloatv(GLenum pname, GLfloat* params) {
+ DCHECK(params);
+ GLsizei num_written = 0;
+ if (!state_.GetStateAsGLfloat(pname, params, &num_written)) {
+ if (GetHelper(pname, NULL, &num_written)) {
+ scoped_ptr<GLint[]> values(new GLint[num_written]);
+ GetHelper(pname, values.get(), &num_written);
+ for (GLsizei ii = 0; ii < num_written; ++ii) {
+ params[ii] = static_cast<GLfloat>(values[ii]);
+ }
+ } else {
+ pname = AdjustGetPname(pname);
+ glGetFloatv(pname, params);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoGetIntegerv(GLenum pname, GLint* params) {
+ DCHECK(params);
+ GLsizei num_written;
+ if (!state_.GetStateAsGLint(pname, params, &num_written) &&
+ !GetHelper(pname, params, &num_written)) {
+ pname = AdjustGetPname(pname);
+ glGetIntegerv(pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetProgramiv(
+ GLuint program_id, GLenum pname, GLint* params) {
+ Program* program = GetProgramInfoNotShader(program_id, "glGetProgramiv");
+ if (!program) {
+ return;
+ }
+ program->GetProgramiv(pname, params);
+}
+
+void GLES2DecoderImpl::DoGetBufferParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ // Just delegate it. Some validation is actually done before this.
+ buffer_manager()->ValidateAndDoGetBufferParameteriv(
+ &state_, target, pname, params);
+}
+
+void GLES2DecoderImpl::DoBindAttribLocation(
+ GLuint program_id, GLuint index, const char* name) {
+ if (!StringIsValidForGLES(name)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glBindAttribLocation", "Invalid character");
+ return;
+ }
+ if (ProgramManager::IsInvalidPrefix(name, strlen(name))) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBindAttribLocation", "reserved prefix");
+ return;
+ }
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glBindAttribLocation", "index out of range");
+ return;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glBindAttribLocation");
+ if (!program) {
+ return;
+ }
+ program->SetAttribLocationBinding(name, static_cast<GLint>(index));
+ glBindAttribLocation(program->service_id(), index, name);
+}
+
+error::Error GLES2DecoderImpl::HandleBindAttribLocationBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindAttribLocationBucket& c =
+ *static_cast<const gles2::cmds::BindAttribLocationBucket*>(cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLuint index = static_cast<GLuint>(c.index);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ DoBindAttribLocation(program, index, name_str.c_str());
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoBindUniformLocationCHROMIUM(
+ GLuint program_id, GLint location, const char* name) {
+ if (!StringIsValidForGLES(name)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "Invalid character");
+ return;
+ }
+ if (ProgramManager::IsInvalidPrefix(name, strlen(name))) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindUniformLocationCHROMIUM", "reserved prefix");
+ return;
+ }
+ if (location < 0 || static_cast<uint32>(location) >=
+ (group_->max_fragment_uniform_vectors() +
+ group_->max_vertex_uniform_vectors()) * 4) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "location out of range");
+ return;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glBindUniformLocationCHROMIUM");
+ if (!program) {
+ return;
+ }
+ if (!program->SetUniformLocationBinding(name, location)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glBindUniformLocationCHROMIUM", "location out of range");
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleBindUniformLocationCHROMIUMBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindUniformLocationCHROMIUMBucket& c =
+ *static_cast<const gles2::cmds::BindUniformLocationCHROMIUMBucket*>(
+ cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLint location = static_cast<GLint>(c.location);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ DoBindUniformLocationCHROMIUM(program, location, name_str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteShader(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteShader& c =
+ *static_cast<const gles2::cmds::DeleteShader*>(cmd_data);
+ GLuint client_id = c.shader;
+ if (client_id) {
+ Shader* shader = GetShader(client_id);
+ if (shader) {
+ if (!shader->IsDeleted()) {
+ glDeleteShader(shader->service_id());
+ shader_manager()->MarkAsDeleted(shader);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDeleteShader", "unknown shader");
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteProgram(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteProgram& c =
+ *static_cast<const gles2::cmds::DeleteProgram*>(cmd_data);
+ GLuint client_id = c.program;
+ if (client_id) {
+ Program* program = GetProgram(client_id);
+ if (program) {
+ if (!program->IsDeleted()) {
+ program_manager()->MarkAsDeleted(shader_manager(), program);
+ }
+ } else {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glDeleteProgram", "unknown program");
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::DoClear(GLbitfield mask) {
+ DCHECK(!ShouldDeferDraws());
+ if (CheckBoundFramebuffersValid("glClear")) {
+ ApplyDirtyState();
+ glClear(mask);
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoFramebufferRenderbuffer(
+ GLenum target, GLenum attachment, GLenum renderbuffertarget,
+ GLuint client_renderbuffer_id) {
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glFramebufferRenderbuffer", "no framebuffer bound");
+ return;
+ }
+ GLuint service_id = 0;
+ Renderbuffer* renderbuffer = NULL;
+ if (client_renderbuffer_id) {
+ renderbuffer = GetRenderbuffer(client_renderbuffer_id);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glFramebufferRenderbuffer", "unknown renderbuffer");
+ return;
+ }
+ service_id = renderbuffer->service_id();
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glFramebufferRenderbuffer");
+ glFramebufferRenderbufferEXT(
+ target, attachment, renderbuffertarget, service_id);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glFramebufferRenderbuffer");
+ if (error == GL_NO_ERROR) {
+ framebuffer->AttachRenderbuffer(attachment, renderbuffer);
+ }
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoDisable(GLenum cap) {
+ if (SetCapabilityState(cap, false)) {
+ glDisable(cap);
+ }
+}
+
+void GLES2DecoderImpl::DoEnable(GLenum cap) {
+ if (SetCapabilityState(cap, true)) {
+ glEnable(cap);
+ }
+}
+
+void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) {
+ state_.z_near = std::min(1.0f, std::max(0.0f, znear));
+ state_.z_far = std::min(1.0f, std::max(0.0f, zfar));
+ glDepthRange(znear, zfar);
+}
+
+void GLES2DecoderImpl::DoSampleCoverage(GLclampf value, GLboolean invert) {
+ state_.sample_coverage_value = std::min(1.0f, std::max(0.0f, value));
+ state_.sample_coverage_invert = (invert != 0);
+ glSampleCoverage(state_.sample_coverage_value, invert);
+}
+
+// Assumes framebuffer is complete.
+void GLES2DecoderImpl::ClearUnclearedAttachments(
+ GLenum target, Framebuffer* framebuffer) {
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ // bind this to the DRAW point, clear then bind back to READ
+ // TODO(gman): I don't think there is any guarantee that an FBO that
+ // is complete on the READ attachment will be complete as a DRAW
+ // attachment.
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, 0);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, framebuffer->service_id());
+ }
+ GLbitfield clear_bits = 0;
+ if (framebuffer->HasUnclearedColorAttachments()) {
+ glClearColor(
+ 0.0f, 0.0f, 0.0f,
+ (GLES2Util::GetChannelsForFormat(
+ framebuffer->GetColorAttachmentFormat()) & 0x0008) != 0 ? 0.0f :
+ 1.0f);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ clear_bits |= GL_COLOR_BUFFER_BIT;
+ if (feature_info_->feature_flags().ext_draw_buffers)
+ framebuffer->PrepareDrawBuffersForClear();
+ }
+
+ if (framebuffer->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT) ||
+ framebuffer->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT)) {
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ clear_bits |= GL_STENCIL_BUFFER_BIT;
+ }
+
+ if (framebuffer->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT) ||
+ framebuffer->HasUnclearedAttachment(GL_DEPTH_STENCIL_ATTACHMENT)) {
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ clear_bits |= GL_DEPTH_BUFFER_BIT;
+ }
+
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(clear_bits);
+
+ if ((clear_bits | GL_COLOR_BUFFER_BIT) != 0 &&
+ feature_info_->feature_flags().ext_draw_buffers)
+ framebuffer->RestoreDrawBuffersAfterClear();
+
+ framebuffer_manager()->MarkAttachmentsAsCleared(
+ framebuffer, renderbuffer_manager(), texture_manager());
+
+ RestoreClearState();
+
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, framebuffer->service_id());
+ Framebuffer* draw_framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ GLuint service_id = draw_framebuffer ? draw_framebuffer->service_id() :
+ GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, service_id);
+ }
+}
+
+void GLES2DecoderImpl::RestoreClearState() {
+ framebuffer_state_.clear_state_dirty = true;
+ glClearColor(
+ state_.color_clear_red, state_.color_clear_green, state_.color_clear_blue,
+ state_.color_clear_alpha);
+ glClearStencil(state_.stencil_clear);
+ glClearDepth(state_.depth_clear);
+ if (state_.enable_flags.scissor_test) {
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+ }
+}
+
+GLenum GLES2DecoderImpl::DoCheckFramebufferStatus(GLenum target) {
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ return GL_FRAMEBUFFER_COMPLETE;
+ }
+ GLenum completeness = framebuffer->IsPossiblyComplete();
+ if (completeness != GL_FRAMEBUFFER_COMPLETE) {
+ return completeness;
+ }
+ return framebuffer->GetStatus(texture_manager(), target);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level) {
+ DoFramebufferTexture2DCommon(
+ "glFramebufferTexture2D", target, attachment,
+ textarget, client_texture_id, level, 0);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2DMultisample(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level, GLsizei samples) {
+ DoFramebufferTexture2DCommon(
+ "glFramebufferTexture2DMultisample", target, attachment,
+ textarget, client_texture_id, level, samples);
+}
+
+void GLES2DecoderImpl::DoFramebufferTexture2DCommon(
+ const char* name, GLenum target, GLenum attachment, GLenum textarget,
+ GLuint client_texture_id, GLint level, GLsizei samples) {
+ if (samples > renderbuffer_manager()->max_samples()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisample", "samples too large");
+ return;
+ }
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ name, "no framebuffer bound.");
+ return;
+ }
+ GLuint service_id = 0;
+ TextureRef* texture_ref = NULL;
+ if (client_texture_id) {
+ texture_ref = GetTexture(client_texture_id);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ name, "unknown texture_ref");
+ return;
+ }
+ service_id = texture_ref->service_id();
+ }
+
+ if (!texture_manager()->ValidForTarget(textarget, level, 0, 0, 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ name, "level out of range");
+ return;
+ }
+
+ if (texture_ref)
+ DoWillUseTexImageIfNeeded(texture_ref->texture(), textarget);
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(name);
+ if (0 == samples) {
+ glFramebufferTexture2DEXT(target, attachment, textarget, service_id, level);
+ } else {
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glFramebufferTexture2DMultisampleIMG(target, attachment, textarget,
+ service_id, level, samples);
+ } else {
+ glFramebufferTexture2DMultisampleEXT(target, attachment, textarget,
+ service_id, level, samples);
+ }
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR(name);
+ if (error == GL_NO_ERROR) {
+ framebuffer->AttachTexture(attachment, texture_ref, textarget, level,
+ samples);
+ }
+ if (framebuffer == framebuffer_state_.bound_draw_framebuffer.get()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ if (texture_ref)
+ DoDidUseTexImageIfNeeded(texture_ref->texture(), textarget);
+
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoGetFramebufferAttachmentParameteriv(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params) {
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
+ if (!framebuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetFramebufferAttachmentParameteriv", "no framebuffer bound");
+ return;
+ }
+ if (pname == GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME) {
+ const Framebuffer::Attachment* attachment_object =
+ framebuffer->GetAttachment(attachment);
+ *params = attachment_object ? attachment_object->object_name() : 0;
+ } else {
+ if (pname == GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT &&
+ features().use_img_for_multisampled_render_to_texture) {
+ pname = GL_TEXTURE_SAMPLES_IMG;
+ }
+ glGetFramebufferAttachmentParameterivEXT(target, attachment, pname, params);
+ }
+}
+
+void GLES2DecoderImpl::DoGetRenderbufferParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetRenderbufferParameteriv", "no renderbuffer bound");
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ switch (pname) {
+ case GL_RENDERBUFFER_INTERNAL_FORMAT:
+ *params = renderbuffer->internal_format();
+ break;
+ case GL_RENDERBUFFER_WIDTH:
+ *params = renderbuffer->width();
+ break;
+ case GL_RENDERBUFFER_HEIGHT:
+ *params = renderbuffer->height();
+ break;
+ case GL_RENDERBUFFER_SAMPLES_EXT:
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glGetRenderbufferParameterivEXT(target, GL_RENDERBUFFER_SAMPLES_IMG,
+ params);
+ } else {
+ glGetRenderbufferParameterivEXT(target, GL_RENDERBUFFER_SAMPLES_EXT,
+ params);
+ }
+ default:
+ glGetRenderbufferParameterivEXT(target, pname, params);
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter) {
+ DCHECK(!ShouldDeferReads() && !ShouldDeferDraws());
+
+ if (!CheckBoundFramebuffersValid("glBlitFramebufferCHROMIUM")) {
+ return;
+ }
+
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ BlitFramebufferHelper(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST,
+ state_.enable_flags.scissor_test);
+}
+
+void GLES2DecoderImpl::EnsureRenderbufferBound() {
+ if (!state_.bound_renderbuffer_valid) {
+ state_.bound_renderbuffer_valid = true;
+ glBindRenderbufferEXT(GL_RENDERBUFFER,
+ state_.bound_renderbuffer.get()
+ ? state_.bound_renderbuffer->service_id()
+ : 0);
+ }
+}
+
+void GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height) {
+ // TODO(sievers): This could be resolved at the GL binding level, but the
+ // binding process is currently a bit too 'brute force'.
+ if (feature_info->feature_flags().is_angle) {
+ glRenderbufferStorageMultisampleANGLE(
+ target, samples, internal_format, width, height);
+ } else if (feature_info->feature_flags().use_core_framebuffer_multisample) {
+ glRenderbufferStorageMultisample(
+ target, samples, internal_format, width, height);
+ } else {
+ glRenderbufferStorageMultisampleEXT(
+ target, samples, internal_format, width, height);
+ }
+}
+
+void GLES2DecoderImpl::BlitFramebufferHelper(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ // TODO(sievers): This could be resolved at the GL binding level, but the
+ // binding process is currently a bit too 'brute force'.
+ if (feature_info_->feature_flags().is_angle) {
+ glBlitFramebufferANGLE(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ } else if (feature_info_->feature_flags().use_core_framebuffer_multisample) {
+ glBlitFramebuffer(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ } else {
+ glBlitFramebufferEXT(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ }
+}
+
+bool GLES2DecoderImpl::ValidateRenderbufferStorageMultisample(
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ if (samples > renderbuffer_manager()->max_samples()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisample", "samples too large");
+ return false;
+ }
+
+ if (width > renderbuffer_manager()->max_renderbuffer_size() ||
+ height > renderbuffer_manager()->max_renderbuffer_size()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisample", "dimensions too large");
+ return false;
+ }
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
+ width, height, samples, internalformat, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisample", "dimensions too large");
+ return false;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisample", "out of memory");
+ return false;
+ }
+
+ return true;
+}
+
+void GLES2DecoderImpl::DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer = GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "no renderbuffer bound");
+ return;
+ }
+
+ if (!ValidateRenderbufferStorageMultisample(
+ samples, internalformat, width, height)) {
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ GLenum impl_format =
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(
+ "glRenderbufferStorageMultisampleCHROMIUM");
+ RenderbufferStorageMultisampleHelper(
+ feature_info_.get(), target, samples, impl_format, width, height);
+ GLenum error =
+ LOCAL_PEEK_GL_ERROR("glRenderbufferStorageMultisampleCHROMIUM");
+ if (error == GL_NO_ERROR) {
+
+ if (workarounds().validate_multisample_buffer_allocation) {
+ if (!VerifyMultisampleRenderbufferIntegrity(
+ renderbuffer->service_id(), impl_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisampleCHROMIUM", "out of memory");
+ return;
+ }
+ }
+
+ // TODO(gman): If renderbuffers tracked which framebuffers they were
+ // attached to we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, samples, internalformat, width, height);
+ }
+}
+
+// This is the handler for multisampled_render_to_texture extensions.
+void GLES2DecoderImpl::DoRenderbufferStorageMultisampleEXT(
+ GLenum target, GLsizei samples, GLenum internalformat,
+ GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer = GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleEXT",
+ "no renderbuffer bound");
+ return;
+ }
+
+ if (!ValidateRenderbufferStorageMultisample(
+ samples, internalformat, width, height)) {
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ GLenum impl_format =
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glRenderbufferStorageMultisampleEXT");
+ if (features().use_img_for_multisampled_render_to_texture) {
+ glRenderbufferStorageMultisampleIMG(
+ target, samples, impl_format, width, height);
+ } else {
+ glRenderbufferStorageMultisampleEXT(
+ target, samples, impl_format, width, height);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glRenderbufferStorageMultisampleEXT");
+ if (error == GL_NO_ERROR) {
+ // TODO(gman): If renderbuffers tracked which framebuffers they were
+ // attached to we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, samples, internalformat, width, height);
+ }
+}
+
+// This function validates the allocation of a multisampled renderbuffer
+// by clearing it to a key color, blitting the contents to a texture, and
+// reading back the color to ensure it matches the key.
+bool GLES2DecoderImpl::VerifyMultisampleRenderbufferIntegrity(
+ GLuint renderbuffer, GLenum format) {
+
+ // Only validate color buffers.
+ // These formats have been selected because they are very common or are known
+ // to be used by the WebGL backbuffer. If problems are observed with other
+ // color formats they can be added here.
+ switch(format) {
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGBA:
+ case GL_RGBA8:
+ break;
+ default:
+ return true;
+ }
+
+ GLint draw_framebuffer, read_framebuffer;
+
+ // Cache framebuffer and texture bindings.
+ glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer);
+ glGetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer);
+
+ if (!validation_texture_) {
+ GLint bound_texture;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
+
+ // Create additional resources needed for the verification.
+ glGenTextures(1, &validation_texture_);
+ glGenFramebuffersEXT(1, &validation_fbo_multisample_);
+ glGenFramebuffersEXT(1, &validation_fbo_);
+
+ // Texture only needs to be 1x1.
+ glBindTexture(GL_TEXTURE_2D, validation_texture_);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 0, GL_RGB,
+ GL_UNSIGNED_BYTE, NULL);
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, validation_texture_, 0);
+
+ glBindTexture(GL_TEXTURE_2D, bound_texture);
+ }
+
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_multisample_);
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER, renderbuffer);
+
+ // Cache current state and reset it to the values we require.
+ GLboolean scissor_enabled = false;
+ glGetBooleanv(GL_SCISSOR_TEST, &scissor_enabled);
+ if (scissor_enabled)
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+
+ GLboolean color_mask[4] = {GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE};
+ glGetBooleanv(GL_COLOR_WRITEMASK, color_mask);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+
+ GLfloat clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ glGetFloatv(GL_COLOR_CLEAR_VALUE, clear_color);
+ glClearColor(1.0f, 0.0f, 1.0f, 1.0f);
+
+ // Clear the buffer to the desired key color.
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ // Blit from the multisample buffer to a standard texture.
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, validation_fbo_multisample_);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, validation_fbo_);
+
+ BlitFramebufferHelper(
+ 0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+
+ // Read a pixel from the buffer.
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_);
+
+ unsigned char pixel[3] = {0, 0, 0};
+ glReadPixels(0, 0, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &pixel);
+
+ // Detach the renderbuffer.
+ glBindFramebufferEXT(GL_FRAMEBUFFER, validation_fbo_multisample_);
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER, 0);
+
+ // Restore cached state.
+ if (scissor_enabled)
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
+
+ state_.SetDeviceColorMask(
+ color_mask[0], color_mask[1], color_mask[2], color_mask[3]);
+ glClearColor(clear_color[0], clear_color[1], clear_color[2], clear_color[3]);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, draw_framebuffer);
+ glBindFramebufferEXT(GL_READ_FRAMEBUFFER, read_framebuffer);
+
+ // Return true if the pixel matched the desired key color.
+ return (pixel[0] == 0xFF &&
+ pixel[1] == 0x00 &&
+ pixel[2] == 0xFF);
+}
+
+void GLES2DecoderImpl::DoRenderbufferStorage(
+ GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {
+ Renderbuffer* renderbuffer =
+ GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glRenderbufferStorage", "no renderbuffer bound");
+ return;
+ }
+
+ if (width > renderbuffer_manager()->max_renderbuffer_size() ||
+ height > renderbuffer_manager()->max_renderbuffer_size()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorage", "dimensions too large");
+ return;
+ }
+
+ uint32 estimated_size = 0;
+ if (!renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
+ width, height, 1, internalformat, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glRenderbufferStorage", "dimensions too large");
+ return;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glRenderbufferStorage", "out of memory");
+ return;
+ }
+
+ EnsureRenderbufferBound();
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glRenderbufferStorage");
+ glRenderbufferStorageEXT(
+ target,
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat),
+ width,
+ height);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glRenderbufferStorage");
+ if (error == GL_NO_ERROR) {
+ // TODO(gman): If tetxures tracked which framebuffers they were attached to
+ // we could just mark those framebuffers as not complete.
+ framebuffer_manager()->IncFramebufferStateChangeCount();
+ renderbuffer_manager()->SetInfo(
+ renderbuffer, 1, internalformat, width, height);
+ }
+}
+
+void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoLinkProgram");
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glLinkProgram");
+ if (!program) {
+ return;
+ }
+
+ LogClientServiceForInfo(program, program_id, "glLinkProgram");
+ ShaderTranslator* vertex_translator = NULL;
+ ShaderTranslator* fragment_translator = NULL;
+ if (use_shader_translator_) {
+ vertex_translator = vertex_translator_.get();
+ fragment_translator = fragment_translator_.get();
+ }
+ if (program->Link(shader_manager(),
+ vertex_translator,
+ fragment_translator,
+ workarounds().count_all_in_varyings_packing ?
+ Program::kCountAll : Program::kCountOnlyStaticallyUsed,
+ shader_cache_callback_)) {
+ if (program == state_.current_program.get()) {
+ if (workarounds().use_current_program_after_successful_link)
+ glUseProgram(program->service_id());
+ if (workarounds().clear_uniforms_before_first_program_use)
+ program_manager()->ClearUniforms(program);
+ }
+ }
+
+ // LinkProgram can be very slow. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+};
+
+void GLES2DecoderImpl::DoTexParameterf(
+ GLenum target, GLenum pname, GLfloat param) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameterf", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameterf(
+ "glTexParameterf", GetErrorState(), texture, pname, param);
+}
+
+void GLES2DecoderImpl::DoTexParameteri(
+ GLenum target, GLenum pname, GLint param) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameteri", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameteri(
+ "glTexParameteri", GetErrorState(), texture, pname, param);
+}
+
+void GLES2DecoderImpl::DoTexParameterfv(
+ GLenum target, GLenum pname, const GLfloat* params) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameterfv", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameterf(
+ "glTexParameterfv", GetErrorState(), texture, pname, *params);
+}
+
+void GLES2DecoderImpl::DoTexParameteriv(
+ GLenum target, GLenum pname, const GLint* params) {
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexParameteriv", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameteri(
+ "glTexParameteriv", GetErrorState(), texture, pname, *params);
+}
+
+bool GLES2DecoderImpl::CheckCurrentProgram(const char* function_name) {
+ if (!state_.current_program.get()) {
+ // The program does not exist.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "no program in use");
+ return false;
+ }
+ if (!state_.current_program->InUse()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "program not linked");
+ return false;
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::CheckCurrentProgramForUniform(
+ GLint location, const char* function_name) {
+ if (!CheckCurrentProgram(function_name)) {
+ return false;
+ }
+ return location != -1;
+}
+
+bool GLES2DecoderImpl::PrepForSetUniformByLocation(
+ GLint fake_location,
+ const char* function_name,
+ Program::UniformApiType api_type,
+ GLint* real_location,
+ GLenum* type,
+ GLsizei* count) {
+ DCHECK(type);
+ DCHECK(count);
+ DCHECK(real_location);
+
+ if (!CheckCurrentProgramForUniform(fake_location, function_name)) {
+ return false;
+ }
+ GLint array_index = -1;
+ const Program::UniformInfo* info =
+ state_.current_program->GetUniformInfoByFakeLocation(
+ fake_location, real_location, &array_index);
+ if (!info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "unknown location");
+ return false;
+ }
+
+ if ((api_type & info->accepts_api_type) == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "wrong uniform function for type");
+ return false;
+ }
+ if (*count > 1 && !info->is_array) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "count > 1 for non-array");
+ return false;
+ }
+ *count = std::min(info->size - array_index, *count);
+ if (*count <= 0) {
+ return false;
+ }
+ *type = info->type;
+ return true;
+}
+
+void GLES2DecoderImpl::DoUniform1i(GLint fake_location, GLint v0) {
+ GLenum type = 0;
+ GLsizei count = 1;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1i",
+ Program::kUniform1i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (!state_.current_program->SetSamplers(
+ state_.texture_units.size(), fake_location, 1, &v0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glUniform1i", "texture unit out of range");
+ return;
+ }
+ glUniform1i(real_location, v0);
+}
+
+void GLES2DecoderImpl::DoUniform1iv(
+ GLint fake_location, GLsizei count, const GLint *value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1iv",
+ Program::kUniform1i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_SAMPLER_2D || type == GL_SAMPLER_2D_RECT_ARB ||
+ type == GL_SAMPLER_CUBE || type == GL_SAMPLER_EXTERNAL_OES) {
+ if (!state_.current_program->SetSamplers(
+ state_.texture_units.size(), fake_location, count, value)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glUniform1iv", "texture unit out of range");
+ return;
+ }
+ }
+ glUniform1iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform1fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform1fv",
+ Program::kUniform1f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL) {
+ scoped_ptr<GLint[]> temp(new GLint[count]);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ DoUniform1iv(real_location, count, temp.get());
+ } else {
+ glUniform1fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform2fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform2fv",
+ Program::kUniform2f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC2) {
+ GLsizei num_values = count * 2;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform2iv(real_location, count, temp.get());
+ } else {
+ glUniform2fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform3fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform3fv",
+ Program::kUniform3f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC3) {
+ GLsizei num_values = count * 3;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform3iv(real_location, count, temp.get());
+ } else {
+ glUniform3fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform4fv(
+ GLint fake_location, GLsizei count, const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform4fv",
+ Program::kUniform4f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ if (type == GL_BOOL_VEC4) {
+ GLsizei num_values = count * 4;
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ temp[ii] = static_cast<GLint>(value[ii] != 0.0f);
+ }
+ glUniform4iv(real_location, count, temp.get());
+ } else {
+ glUniform4fv(real_location, count, value);
+ }
+}
+
+void GLES2DecoderImpl::DoUniform2iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform2iv",
+ Program::kUniform2i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform2iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform3iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform3iv",
+ Program::kUniform3i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform3iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniform4iv(
+ GLint fake_location, GLsizei count, const GLint* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniform4iv",
+ Program::kUniform4i,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniform4iv(real_location, count, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix2fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix2fv",
+ Program::kUniformMatrix2f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix2fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix3fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix3fv",
+ Program::kUniformMatrix3f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix3fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUniformMatrix4fv(
+ GLint fake_location, GLsizei count, GLboolean transpose,
+ const GLfloat* value) {
+ GLenum type = 0;
+ GLint real_location = -1;
+ if (!PrepForSetUniformByLocation(fake_location,
+ "glUniformMatrix4fv",
+ Program::kUniformMatrix4f,
+ &real_location,
+ &type,
+ &count)) {
+ return;
+ }
+ glUniformMatrix4fv(real_location, count, transpose, value);
+}
+
+void GLES2DecoderImpl::DoUseProgram(GLuint program_id) {
+ GLuint service_id = 0;
+ Program* program = NULL;
+ if (program_id) {
+ program = GetProgramInfoNotShader(program_id, "glUseProgram");
+ if (!program) {
+ return;
+ }
+ if (!program->IsValid()) {
+ // Program was not linked successfully. (ie, glLinkProgram)
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glUseProgram", "program not linked");
+ return;
+ }
+ service_id = program->service_id();
+ }
+ if (state_.current_program.get()) {
+ program_manager()->UnuseProgram(shader_manager(),
+ state_.current_program.get());
+ }
+ state_.current_program = program;
+ LogClientServiceMapping("glUseProgram", program_id, service_id);
+ glUseProgram(service_id);
+ if (state_.current_program.get()) {
+ program_manager()->UseProgram(state_.current_program.get());
+ if (workarounds().clear_uniforms_before_first_program_use)
+ program_manager()->ClearUniforms(program);
+ }
+}
+
+void GLES2DecoderImpl::RenderWarning(
+ const char* filename, int line, const std::string& msg) {
+ logger_.LogMessage(filename, line, std::string("RENDER WARNING: ") + msg);
+}
+
+void GLES2DecoderImpl::PerformanceWarning(
+ const char* filename, int line, const std::string& msg) {
+ logger_.LogMessage(filename, line,
+ std::string("PERFORMANCE WARNING: ") + msg);
+}
+
+void GLES2DecoderImpl::DoWillUseTexImageIfNeeded(
+ Texture* texture, GLenum textarget) {
+ // Image is already in use if texture is attached to a framebuffer.
+ if (texture && !texture->IsAttachedToFramebuffer()) {
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoWillUseTexImageIfNeeded",
+ GetErrorState());
+ glBindTexture(textarget, texture->service_id());
+ image->WillUseTexImage();
+ RestoreCurrentTextureBindings(&state_, textarget);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoDidUseTexImageIfNeeded(
+ Texture* texture, GLenum textarget) {
+ // Image is still in use if texture is attached to a framebuffer.
+ if (texture && !texture->IsAttachedToFramebuffer()) {
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoDidUseTexImageIfNeeded",
+ GetErrorState());
+ glBindTexture(textarget, texture->service_id());
+ image->DidUseTexImage();
+ RestoreCurrentTextureBindings(&state_, textarget);
+ }
+ }
+}
+
+bool GLES2DecoderImpl::PrepareTexturesForRender() {
+ DCHECK(state_.current_program.get());
+ if (!texture_manager()->HaveUnrenderableTextures() &&
+ !texture_manager()->HaveImages()) {
+ return true;
+ }
+
+ bool textures_set = false;
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ GLenum textarget = GetBindTargetForSamplerType(uniform_info->type);
+ if (!texture_ref || !texture_manager()->CanRender(texture_ref)) {
+ textures_set = true;
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ glBindTexture(
+ textarget,
+ texture_manager()->black_texture_id(uniform_info->type));
+ LOCAL_RENDER_WARNING(
+ std::string("texture bound to texture unit ") +
+ base::IntToString(texture_unit_index) +
+ " is not renderable. It maybe non-power-of-2 and have"
+ " incompatible texture filtering or is not"
+ " 'texture complete'");
+ continue;
+ }
+
+ if (textarget != GL_TEXTURE_CUBE_MAP) {
+ Texture* texture = texture_ref->texture();
+ gfx::GLImage* image = texture->GetLevelImage(textarget, 0);
+ if (image && !texture->IsAttachedToFramebuffer()) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::PrepareTexturesForRender", GetErrorState());
+ textures_set = true;
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ image->WillUseTexImage();
+ continue;
+ }
+ }
+ }
+ // else: should this be an error?
+ }
+ }
+ return !textures_set;
+}
+
+void GLES2DecoderImpl::RestoreStateForTextures() {
+ DCHECK(state_.current_program.get());
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ if (!texture_ref || !texture_manager()->CanRender(texture_ref)) {
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ // Get the texture_ref info that was previously bound here.
+ texture_ref = texture_unit.bind_target == GL_TEXTURE_2D
+ ? texture_unit.bound_texture_2d.get()
+ : texture_unit.bound_texture_cube_map.get();
+ glBindTexture(texture_unit.bind_target,
+ texture_ref ? texture_ref->service_id() : 0);
+ continue;
+ }
+
+ if (texture_unit.bind_target != GL_TEXTURE_CUBE_MAP) {
+ Texture* texture = texture_ref->texture();
+ gfx::GLImage* image =
+ texture->GetLevelImage(texture_unit.bind_target, 0);
+ if (image && !texture->IsAttachedToFramebuffer()) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::RestoreStateForTextures", GetErrorState());
+ glActiveTexture(GL_TEXTURE0 + texture_unit_index);
+ image->DidUseTexImage();
+ continue;
+ }
+ }
+ }
+ }
+ }
+ // Set the active texture back to whatever the user had it as.
+ glActiveTexture(GL_TEXTURE0 + state_.active_texture_unit);
+}
+
+bool GLES2DecoderImpl::ClearUnclearedTextures() {
+ // Only check if there are some uncleared textures.
+ if (!texture_manager()->HaveUnsafeTextures()) {
+ return true;
+ }
+
+ // 1: Check all textures we are about to render with.
+ if (state_.current_program.get()) {
+ const Program::SamplerIndices& sampler_indices =
+ state_.current_program->sampler_indices();
+ for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
+ const Program::UniformInfo* uniform_info =
+ state_.current_program->GetUniformInfo(sampler_indices[ii]);
+ DCHECK(uniform_info);
+ for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
+ GLuint texture_unit_index = uniform_info->texture_units[jj];
+ if (texture_unit_index < state_.texture_units.size()) {
+ TextureUnit& texture_unit = state_.texture_units[texture_unit_index];
+ TextureRef* texture_ref =
+ texture_unit.GetInfoForSamplerType(uniform_info->type).get();
+ if (texture_ref && !texture_ref->texture()->SafeToRenderFrom()) {
+ if (!texture_manager()->ClearRenderableLevels(this, texture_ref)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool GLES2DecoderImpl::IsDrawValid(
+ const char* function_name, GLuint max_vertex_accessed, bool instanced,
+ GLsizei primcount) {
+ DCHECK(instanced || primcount == 1);
+
+ // NOTE: We specifically do not check current_program->IsValid() because
+ // it could never be invalid since glUseProgram would have failed. While
+ // glLinkProgram could later mark the program as invalid the previous
+ // valid program will still function if it is still the current program.
+ if (!state_.current_program.get()) {
+ // The program does not exist.
+ // But GL says no ERROR.
+ LOCAL_RENDER_WARNING("Drawing with no current shader program.");
+ return false;
+ }
+
+ return state_.vertex_attrib_manager
+ ->ValidateBindings(function_name,
+ this,
+ feature_info_.get(),
+ state_.current_program.get(),
+ max_vertex_accessed,
+ instanced,
+ primcount);
+}
+
+bool GLES2DecoderImpl::SimulateAttrib0(
+ const char* function_name, GLuint max_vertex_accessed, bool* simulated) {
+ DCHECK(simulated);
+ *simulated = false;
+
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2)
+ return true;
+
+ const VertexAttrib* attrib =
+ state_.vertex_attrib_manager->GetVertexAttrib(0);
+ // If it's enabled or it's not used then we don't need to do anything.
+ bool attrib_0_used =
+ state_.current_program->GetAttribInfoByLocation(0) != NULL;
+ if (attrib->enabled() && attrib_0_used) {
+ return true;
+ }
+
+ // Make a buffer with a single repeated vec4 value enough to
+ // simulate the constant value that is supposed to be here.
+ // This is required to emulate GLES2 on GL.
+ GLuint num_vertices = max_vertex_accessed + 1;
+ uint32 size_needed = 0;
+
+ if (num_vertices == 0 ||
+ !SafeMultiplyUint32(num_vertices, sizeof(Vec4), &size_needed) ||
+ size_needed > 0x7FFFFFFFU) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+
+ LOCAL_PERFORMANCE_WARNING(
+ "Attribute 0 is disabled. This has signficant performance penalty");
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name);
+ glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
+
+ bool new_buffer = static_cast<GLsizei>(size_needed) > attrib_0_size_;
+ if (new_buffer) {
+ glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW);
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ }
+
+ const Vec4& value = state_.attrib_values[0];
+ if (new_buffer ||
+ (attrib_0_used &&
+ (!attrib_0_buffer_matches_value_ ||
+ (value.v[0] != attrib_0_value_.v[0] ||
+ value.v[1] != attrib_0_value_.v[1] ||
+ value.v[2] != attrib_0_value_.v[2] ||
+ value.v[3] != attrib_0_value_.v[3])))) {
+ std::vector<Vec4> temp(num_vertices, value);
+ glBufferSubData(GL_ARRAY_BUFFER, 0, size_needed, &temp[0].v[0]);
+ attrib_0_buffer_matches_value_ = true;
+ attrib_0_value_ = value;
+ attrib_0_size_ = size_needed;
+ }
+
+ glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL);
+
+ if (attrib->divisor())
+ glVertexAttribDivisorANGLE(0, 0);
+
+ *simulated = true;
+ return true;
+}
+
+void GLES2DecoderImpl::RestoreStateForAttrib(
+ GLuint attrib_index, bool restore_array_binding) {
+ const VertexAttrib* attrib =
+ state_.vertex_attrib_manager->GetVertexAttrib(attrib_index);
+ if (restore_array_binding) {
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ Buffer* buffer = attrib->buffer();
+ glBindBuffer(GL_ARRAY_BUFFER, buffer ? buffer->service_id() : 0);
+ glVertexAttribPointer(
+ attrib_index, attrib->size(), attrib->type(), attrib->normalized(),
+ attrib->gl_stride(), ptr);
+ }
+ if (attrib->divisor())
+ glVertexAttribDivisorANGLE(attrib_index, attrib->divisor());
+ glBindBuffer(
+ GL_ARRAY_BUFFER, state_.bound_array_buffer.get() ?
+ state_.bound_array_buffer->service_id() : 0);
+
+ // Never touch vertex attribute 0's state (in particular, never
+ // disable it) when running on desktop GL because it will never be
+ // re-enabled.
+ if (attrib_index != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ if (attrib->enabled()) {
+ glEnableVertexAttribArray(attrib_index);
+ } else {
+ glDisableVertexAttribArray(attrib_index);
+ }
+ }
+}
+
+bool GLES2DecoderImpl::SimulateFixedAttribs(
+ const char* function_name,
+ GLuint max_vertex_accessed, bool* simulated, GLsizei primcount) {
+ DCHECK(simulated);
+ *simulated = false;
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2)
+ return true;
+
+ if (!state_.vertex_attrib_manager->HaveFixedAttribs()) {
+ return true;
+ }
+
+ LOCAL_PERFORMANCE_WARNING(
+ "GL_FIXED attributes have a signficant performance penalty");
+
+ // NOTE: we could be smart and try to check if a buffer is used
+ // twice in 2 different attribs, find the overlapping parts and therefore
+ // duplicate the minimum amount of data but this whole code path is not meant
+ // to be used normally. It's just here to pass that OpenGL ES 2.0 conformance
+ // tests so we just add to the buffer attrib used.
+
+ GLuint elements_needed = 0;
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ state_.vertex_attrib_manager->GetEnabledVertexAttribs();
+ for (VertexAttribManager::VertexAttribList::const_iterator it =
+ enabled_attribs.begin(); it != enabled_attribs.end(); ++it) {
+ const VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ state_.current_program->GetAttribInfoByLocation(attrib->index());
+ GLuint max_accessed = attrib->MaxVertexAccessed(primcount,
+ max_vertex_accessed);
+ GLuint num_vertices = max_accessed + 1;
+ if (num_vertices == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ if (attrib_info &&
+ attrib->CanAccess(max_accessed) &&
+ attrib->type() == GL_FIXED) {
+ uint32 elements_used = 0;
+ if (!SafeMultiplyUint32(num_vertices, attrib->size(), &elements_used) ||
+ !SafeAddUint32(elements_needed, elements_used, &elements_needed)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+ }
+ }
+
+ const uint32 kSizeOfFloat = sizeof(float); // NOLINT
+ uint32 size_needed = 0;
+ if (!SafeMultiplyUint32(elements_needed, kSizeOfFloat, &size_needed) ||
+ size_needed > 0x7FFFFFFFU) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name);
+
+ glBindBuffer(GL_ARRAY_BUFFER, fixed_attrib_buffer_id_);
+ if (static_cast<GLsizei>(size_needed) > fixed_attrib_buffer_size_) {
+ glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW);
+ GLenum error = glGetError();
+ if (error != GL_NO_ERROR) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
+ return false;
+ }
+ }
+
+ // Copy the elements and convert to float
+ GLintptr offset = 0;
+ for (VertexAttribManager::VertexAttribList::const_iterator it =
+ enabled_attribs.begin(); it != enabled_attribs.end(); ++it) {
+ const VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ state_.current_program->GetAttribInfoByLocation(attrib->index());
+ GLuint max_accessed = attrib->MaxVertexAccessed(primcount,
+ max_vertex_accessed);
+ GLuint num_vertices = max_accessed + 1;
+ if (num_vertices == 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
+ return false;
+ }
+ if (attrib_info &&
+ attrib->CanAccess(max_accessed) &&
+ attrib->type() == GL_FIXED) {
+ int num_elements = attrib->size() * kSizeOfFloat;
+ int size = num_elements * num_vertices;
+ scoped_ptr<float[]> data(new float[size]);
+ const int32* src = reinterpret_cast<const int32 *>(
+ attrib->buffer()->GetRange(attrib->offset(), size));
+ const int32* end = src + num_elements;
+ float* dst = data.get();
+ while (src != end) {
+ *dst++ = static_cast<float>(*src++) / 65536.0f;
+ }
+ glBufferSubData(GL_ARRAY_BUFFER, offset, size, data.get());
+ glVertexAttribPointer(
+ attrib->index(), attrib->size(), GL_FLOAT, false, 0,
+ reinterpret_cast<GLvoid*>(offset));
+ offset += size;
+ }
+ }
+ *simulated = true;
+ return true;
+}
+
+void GLES2DecoderImpl::RestoreStateForSimulatedFixedAttribs() {
+ // There's no need to call glVertexAttribPointer because we shadow all the
+ // settings and passing GL_FIXED to it will not work.
+ glBindBuffer(
+ GL_ARRAY_BUFFER,
+ state_.bound_array_buffer.get() ? state_.bound_array_buffer->service_id()
+ : 0);
+}
+
+error::Error GLES2DecoderImpl::DoDrawArrays(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ error::Error error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ if (!validators_->draw_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
+ return error::kNoError;
+ }
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return error::kNoError;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return error::kNoError;
+ }
+ if (!CheckBoundFramebuffersValid(function_name)) {
+ return error::kNoError;
+ }
+ // We have to check this here because the prototype for glDrawArrays
+ // is GLint not GLsizei.
+ if (first < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "first < 0");
+ return error::kNoError;
+ }
+
+ if (count == 0 || primcount == 0) {
+ LOCAL_RENDER_WARNING("Render count or primcount is 0.");
+ return error::kNoError;
+ }
+
+ GLuint max_vertex_accessed = first + count - 1;
+ if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
+ if (!ClearUnclearedTextures()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ return error::kNoError;
+ }
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(
+ function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ return error::kNoError;
+ }
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(
+ function_name, max_vertex_accessed, &simulated_fixed_attribs,
+ primcount)) {
+ bool textures_set = !PrepareTexturesForRender();
+ ApplyDirtyState();
+ ScopedRenderTo do_render(framebuffer_state_.bound_draw_framebuffer.get());
+ if (!instanced) {
+ glDrawArrays(mode, first, count);
+ } else {
+ glDrawArraysInstancedANGLE(mode, first, count, primcount);
+ }
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
+ }
+ }
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawArrays(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const cmds::DrawArrays& c = *static_cast<const cmds::DrawArrays*>(cmd_data);
+ return DoDrawArrays("glDrawArrays",
+ false,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLint>(c.first),
+ static_cast<GLsizei>(c.count),
+ 1);
+}
+
+error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawArraysInstancedANGLE& c =
+ *static_cast<const gles2::cmds::DrawArraysInstancedANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawArraysInstancedANGLE", "function not available");
+ return error::kNoError;
+ }
+ return DoDrawArrays("glDrawArraysIntancedANGLE",
+ true,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLint>(c.first),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLsizei>(c.primcount));
+}
+
+error::Error GLES2DecoderImpl::DoDrawElements(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ GLsizei count,
+ GLenum type,
+ int32 offset,
+ GLsizei primcount) {
+ error::Error error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ if (!state_.vertex_attrib_manager->element_array_buffer()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "No element array buffer bound");
+ return error::kNoError;
+ }
+
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return error::kNoError;
+ }
+ if (offset < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "offset < 0");
+ return error::kNoError;
+ }
+ if (!validators_->draw_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
+ return error::kNoError;
+ }
+ if (!validators_->index_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, type, "type");
+ return error::kNoError;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return error::kNoError;
+ }
+
+ if (!CheckBoundFramebuffersValid(function_name)) {
+ return error::kNoError;
+ }
+
+ if (count == 0 || primcount == 0) {
+ return error::kNoError;
+ }
+
+ GLuint max_vertex_accessed;
+ Buffer* element_array_buffer =
+ state_.vertex_attrib_manager->element_array_buffer();
+
+ if (!element_array_buffer->GetMaxValueForRange(
+ offset, count, type, &max_vertex_accessed)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "range out of bounds for buffer");
+ return error::kNoError;
+ }
+
+ if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
+ if (!ClearUnclearedTextures()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ return error::kNoError;
+ }
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(
+ function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ return error::kNoError;
+ }
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(
+ function_name, max_vertex_accessed, &simulated_fixed_attribs,
+ primcount)) {
+ bool textures_set = !PrepareTexturesForRender();
+ ApplyDirtyState();
+ // TODO(gman): Refactor to hide these details in BufferManager or
+ // VertexAttribManager.
+ const GLvoid* indices = reinterpret_cast<const GLvoid*>(offset);
+ bool used_client_side_array = false;
+ if (element_array_buffer->IsClientSideArray()) {
+ used_client_side_array = true;
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ indices = element_array_buffer->GetRange(offset, 0);
+ }
+
+ ScopedRenderTo do_render(framebuffer_state_.bound_draw_framebuffer.get());
+ if (!instanced) {
+ glDrawElements(mode, count, type, indices);
+ } else {
+ glDrawElementsInstancedANGLE(mode, count, type, indices, primcount);
+ }
+
+ if (used_client_side_array) {
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer->service_id());
+ }
+
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
+ }
+ }
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
+ }
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawElements(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawElements& c =
+ *static_cast<const gles2::cmds::DrawElements*>(cmd_data);
+ return DoDrawElements("glDrawElements",
+ false,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLenum>(c.type),
+ static_cast<int32>(c.index_offset),
+ 1);
+}
+
+error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawElementsInstancedANGLE& c =
+ *static_cast<const gles2::cmds::DrawElementsInstancedANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawElementsInstancedANGLE", "function not available");
+ return error::kNoError;
+ }
+ return DoDrawElements("glDrawElementsInstancedANGLE",
+ true,
+ static_cast<GLenum>(c.mode),
+ static_cast<GLsizei>(c.count),
+ static_cast<GLenum>(c.type),
+ static_cast<int32>(c.index_offset),
+ static_cast<GLsizei>(c.primcount));
+}
+
+GLuint GLES2DecoderImpl::DoGetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ GLuint max_vertex_accessed = 0;
+ Buffer* buffer = GetBuffer(buffer_id);
+ if (!buffer) {
+ // TODO(gman): Should this be a GL error or a command buffer error?
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "GetMaxValueInBufferCHROMIUM", "unknown buffer");
+ } else {
+ if (!buffer->GetMaxValueForRange(
+ offset, count, type, &max_vertex_accessed)) {
+ // TODO(gman): Should this be a GL error or a command buffer error?
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "GetMaxValueInBufferCHROMIUM", "range out of bounds for buffer");
+ }
+ }
+ return max_vertex_accessed;
+}
+
+// Calls glShaderSource for the various versions of the ShaderSource command.
+// Assumes that data / data_size points to a piece of memory that is in range
+// of whatever context it came from (shared memory, immediate memory, bucket
+// memory.)
+error::Error GLES2DecoderImpl::ShaderSourceHelper(
+ GLuint client_id, const char* data, uint32 data_size) {
+ std::string str(data, data + data_size);
+ Shader* shader = GetShaderInfoNotProgram(client_id, "glShaderSource");
+ if (!shader) {
+ return error::kNoError;
+ }
+ // Note: We don't actually call glShaderSource here. We wait until
+ // the call to glCompileShader.
+ shader->set_source(str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleShaderSourceBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ShaderSourceBucket& c =
+ *static_cast<const gles2::cmds::ShaderSourceBucket*>(cmd_data);
+ Bucket* bucket = GetBucket(c.data_bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ return ShaderSourceHelper(
+ c.shader, bucket->GetDataAs<const char*>(0, bucket->size() - 1),
+ bucket->size() - 1);
+}
+
+void GLES2DecoderImpl::DoCompileShader(GLuint client_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCompileShader");
+ Shader* shader = GetShaderInfoNotProgram(client_id, "glCompileShader");
+ if (!shader) {
+ return;
+ }
+ ShaderTranslator* translator = NULL;
+ if (use_shader_translator_) {
+ translator = shader->shader_type() == GL_VERTEX_SHADER ?
+ vertex_translator_.get() : fragment_translator_.get();
+ }
+
+ shader->DoCompile(
+ translator,
+ feature_info_->feature_flags().angle_translated_shader_source ?
+ Shader::kANGLE : Shader::kGL);
+
+ // CompileShader can be very slow. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+void GLES2DecoderImpl::DoGetShaderiv(
+ GLuint shader_id, GLenum pname, GLint* params) {
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderiv");
+ if (!shader) {
+ return;
+ }
+ switch (pname) {
+ case GL_SHADER_SOURCE_LENGTH:
+ *params = shader->source().size();
+ if (*params)
+ ++(*params);
+ return;
+ case GL_COMPILE_STATUS:
+ *params = compile_shader_always_succeeds_ ? true : shader->valid();
+ return;
+ case GL_INFO_LOG_LENGTH:
+ *params = shader->log_info().size();
+ if (*params)
+ ++(*params);
+ return;
+ case GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE:
+ *params = shader->translated_source().size();
+ if (*params)
+ ++(*params);
+ return;
+ default:
+ break;
+ }
+ glGetShaderiv(shader->service_id(), pname, params);
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderSource(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderSource& c =
+ *static_cast<const gles2::cmds::GetShaderSource*>(cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderSource");
+ if (!shader || shader->source().empty()) {
+ bucket->SetSize(0);
+ return error::kNoError;
+ }
+ bucket->SetFromString(shader->source().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTranslatedShaderSourceANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTranslatedShaderSourceANGLE& c =
+ *static_cast<const gles2::cmds::GetTranslatedShaderSourceANGLE*>(
+ cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(
+ shader_id, "glGetTranslatedShaderSourceANGLE");
+ if (!shader) {
+ bucket->SetSize(0);
+ return error::kNoError;
+ }
+
+ bucket->SetFromString(shader->translated_source().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramInfoLog(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramInfoLog& c =
+ *static_cast<const gles2::cmds::GetProgramInfoLog*>(cmd_data);
+ GLuint program_id = c.program;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetProgramInfoLog");
+ if (!program || !program->log_info()) {
+ bucket->SetFromString("");
+ return error::kNoError;
+ }
+ bucket->SetFromString(program->log_info()->c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderInfoLog(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderInfoLog& c =
+ *static_cast<const gles2::cmds::GetShaderInfoLog*>(cmd_data);
+ GLuint shader_id = c.shader;
+ uint32 bucket_id = static_cast<uint32>(c.bucket_id);
+ Bucket* bucket = CreateBucket(bucket_id);
+ Shader* shader = GetShaderInfoNotProgram(shader_id, "glGetShaderInfoLog");
+ if (!shader) {
+ bucket->SetFromString("");
+ return error::kNoError;
+ }
+ bucket->SetFromString(shader->log_info().c_str());
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::DoIsEnabled(GLenum cap) {
+ return state_.GetEnabled(cap);
+}
+
+bool GLES2DecoderImpl::DoIsBuffer(GLuint client_id) {
+ const Buffer* buffer = GetBuffer(client_id);
+ return buffer && buffer->IsValid() && !buffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsFramebuffer(GLuint client_id) {
+ const Framebuffer* framebuffer =
+ GetFramebuffer(client_id);
+ return framebuffer && framebuffer->IsValid() && !framebuffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsProgram(GLuint client_id) {
+ // IsProgram is true for programs as soon as they are created, until they are
+ // deleted and no longer in use.
+ const Program* program = GetProgram(client_id);
+ return program != NULL && !program->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsRenderbuffer(GLuint client_id) {
+ const Renderbuffer* renderbuffer =
+ GetRenderbuffer(client_id);
+ return renderbuffer && renderbuffer->IsValid() && !renderbuffer->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsShader(GLuint client_id) {
+ // IsShader is true for shaders as soon as they are created, until they
+ // are deleted and not attached to any programs.
+ const Shader* shader = GetShader(client_id);
+ return shader != NULL && !shader->IsDeleted();
+}
+
+bool GLES2DecoderImpl::DoIsTexture(GLuint client_id) {
+ const TextureRef* texture_ref = GetTexture(client_id);
+ return texture_ref && texture_ref->texture()->IsValid();
+}
+
+void GLES2DecoderImpl::DoAttachShader(
+ GLuint program_client_id, GLint shader_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glAttachShader");
+ if (!program) {
+ return;
+ }
+ Shader* shader = GetShaderInfoNotProgram(shader_client_id, "glAttachShader");
+ if (!shader) {
+ return;
+ }
+ if (!program->AttachShader(shader_manager(), shader)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glAttachShader",
+ "can not attach more than one shader of the same type.");
+ return;
+ }
+ glAttachShader(program->service_id(), shader->service_id());
+}
+
+void GLES2DecoderImpl::DoDetachShader(
+ GLuint program_client_id, GLint shader_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glDetachShader");
+ if (!program) {
+ return;
+ }
+ Shader* shader = GetShaderInfoNotProgram(shader_client_id, "glDetachShader");
+ if (!shader) {
+ return;
+ }
+ if (!program->DetachShader(shader_manager(), shader)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDetachShader", "shader not attached to program");
+ return;
+ }
+ glDetachShader(program->service_id(), shader->service_id());
+}
+
+void GLES2DecoderImpl::DoValidateProgram(GLuint program_client_id) {
+ Program* program = GetProgramInfoNotShader(
+ program_client_id, "glValidateProgram");
+ if (!program) {
+ return;
+ }
+ program->Validate();
+}
+
+void GLES2DecoderImpl::GetVertexAttribHelper(
+ const VertexAttrib* attrib, GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING: {
+ Buffer* buffer = attrib->buffer();
+ if (buffer && !buffer->IsDeleted()) {
+ GLuint client_id;
+ buffer_manager()->GetClientId(buffer->service_id(), &client_id);
+ *params = client_id;
+ }
+ break;
+ }
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ *params = attrib->enabled();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ *params = attrib->size();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ *params = attrib->gl_stride();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ *params = attrib->type();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ *params = attrib->normalized();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE:
+ *params = attrib->divisor();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+void GLES2DecoderImpl::DoGetTexParameterfv(
+ GLenum target, GLenum pname, GLfloat* params) {
+ InitTextureMaxAnisotropyIfNeeded(target, pname);
+ glGetTexParameterfv(target, pname, params);
+}
+
+void GLES2DecoderImpl::DoGetTexParameteriv(
+ GLenum target, GLenum pname, GLint* params) {
+ InitTextureMaxAnisotropyIfNeeded(target, pname);
+ glGetTexParameteriv(target, pname, params);
+}
+
+void GLES2DecoderImpl::InitTextureMaxAnisotropyIfNeeded(
+ GLenum target, GLenum pname) {
+ if (!workarounds().init_texture_max_anisotropy)
+ return;
+ if (pname != GL_TEXTURE_MAX_ANISOTROPY_EXT ||
+ !validators_->texture_parameter.IsValid(pname)) {
+ return;
+ }
+
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glGetTexParamter{fi}v", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ texture->InitTextureMaxAnisotropyIfNeeded(target);
+}
+
+void GLES2DecoderImpl::DoGetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) {
+ VertexAttrib* attrib = state_.vertex_attrib_manager->GetVertexAttrib(index);
+ if (!attrib) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribfv", "index out of range");
+ return;
+ }
+ switch (pname) {
+ case GL_CURRENT_VERTEX_ATTRIB: {
+ const Vec4& value = state_.attrib_values[index];
+ params[0] = value.v[0];
+ params[1] = value.v[1];
+ params[2] = value.v[2];
+ params[3] = value.v[3];
+ break;
+ }
+ default: {
+ GLint value = 0;
+ GetVertexAttribHelper(attrib, pname, &value);
+ *params = static_cast<GLfloat>(value);
+ break;
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoGetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) {
+ VertexAttrib* attrib = state_.vertex_attrib_manager->GetVertexAttrib(index);
+ if (!attrib) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribiv", "index out of range");
+ return;
+ }
+ switch (pname) {
+ case GL_CURRENT_VERTEX_ATTRIB: {
+ const Vec4& value = state_.attrib_values[index];
+ params[0] = static_cast<GLint>(value.v[0]);
+ params[1] = static_cast<GLint>(value.v[1]);
+ params[2] = static_cast<GLint>(value.v[2]);
+ params[3] = static_cast<GLint>(value.v[3]);
+ break;
+ }
+ default:
+ GetVertexAttribHelper(attrib, pname, params);
+ break;
+ }
+}
+
+bool GLES2DecoderImpl::SetVertexAttribValue(
+ const char* function_name, GLuint index, const GLfloat* value) {
+ if (index >= state_.attrib_values.size()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "index out of range");
+ return false;
+ }
+ Vec4& v = state_.attrib_values[index];
+ v.v[0] = value[0];
+ v.v[1] = value[1];
+ v.v[2] = value[2];
+ v.v[3] = value[3];
+ return true;
+}
+
+void GLES2DecoderImpl::DoVertexAttrib1f(GLuint index, GLfloat v0) {
+ GLfloat v[4] = { v0, 0.0f, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib1f", index, v)) {
+ glVertexAttrib1f(index, v0);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib2f(GLuint index, GLfloat v0, GLfloat v1) {
+ GLfloat v[4] = { v0, v1, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib2f", index, v)) {
+ glVertexAttrib2f(index, v0, v1);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib3f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2) {
+ GLfloat v[4] = { v0, v1, v2, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib3f", index, v)) {
+ glVertexAttrib3f(index, v0, v1, v2);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib4f(
+ GLuint index, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) {
+ GLfloat v[4] = { v0, v1, v2, v3, };
+ if (SetVertexAttribValue("glVertexAttrib4f", index, v)) {
+ glVertexAttrib4f(index, v0, v1, v2, v3);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib1fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], 0.0f, 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib1fv", index, t)) {
+ glVertexAttrib1fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib2fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], v[1], 0.0f, 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib2fv", index, t)) {
+ glVertexAttrib2fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib3fv(GLuint index, const GLfloat* v) {
+ GLfloat t[4] = { v[0], v[1], v[2], 1.0f, };
+ if (SetVertexAttribValue("glVertexAttrib3fv", index, t)) {
+ glVertexAttrib3fv(index, v);
+ }
+}
+
+void GLES2DecoderImpl::DoVertexAttrib4fv(GLuint index, const GLfloat* v) {
+ if (SetVertexAttribValue("glVertexAttrib4fv", index, v)) {
+ glVertexAttrib4fv(index, v);
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttribPointer(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttribPointer& c =
+ *static_cast<const gles2::cmds::VertexAttribPointer*>(cmd_data);
+
+ if (!state_.bound_array_buffer.get() ||
+ state_.bound_array_buffer->IsDeleted()) {
+ if (state_.vertex_attrib_manager.get() ==
+ state_.default_vertex_attrib_manager.get()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "no array buffer bound");
+ return error::kNoError;
+ } else if (c.offset != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glVertexAttribPointer", "client side arrays are not allowed");
+ return error::kNoError;
+ }
+ }
+
+ GLuint indx = c.indx;
+ GLint size = c.size;
+ GLenum type = c.type;
+ GLboolean normalized = c.normalized;
+ GLsizei stride = c.stride;
+ GLsizei offset = c.offset;
+ const void* ptr = reinterpret_cast<const void*>(offset);
+ if (!validators_->vertex_attrib_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glVertexAttribPointer", type, "type");
+ return error::kNoError;
+ }
+ if (!validators_->vertex_attrib_size.IsValid(size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "size GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ if (indx >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "index out of range");
+ return error::kNoError;
+ }
+ if (stride < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "stride < 0");
+ return error::kNoError;
+ }
+ if (stride > 255) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "stride > 255");
+ return error::kNoError;
+ }
+ if (offset < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glVertexAttribPointer", "offset < 0");
+ return error::kNoError;
+ }
+ GLsizei component_size =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type);
+ // component_size must be a power of two to use & as optimized modulo.
+ DCHECK(GLES2Util::IsPOT(component_size));
+ if (offset & (component_size - 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribPointer", "offset not valid for type");
+ return error::kNoError;
+ }
+ if (stride & (component_size - 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribPointer", "stride not valid for type");
+ return error::kNoError;
+ }
+ state_.vertex_attrib_manager
+ ->SetAttribInfo(indx,
+ state_.bound_array_buffer.get(),
+ size,
+ type,
+ normalized,
+ stride,
+ stride != 0 ? stride : component_size * size,
+ offset);
+ if (type != GL_FIXED) {
+ glVertexAttribPointer(indx, size, type, normalized, stride, ptr);
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoViewport(GLint x, GLint y, GLsizei width,
+ GLsizei height) {
+ state_.viewport_x = x;
+ state_.viewport_y = y;
+ state_.viewport_width = std::min(width, viewport_max_width_);
+ state_.viewport_height = std::min(height, viewport_max_height_);
+ glViewport(x, y, width, height);
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttribDivisorANGLE(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttribDivisorANGLE& c =
+ *static_cast<const gles2::cmds::VertexAttribDivisorANGLE*>(cmd_data);
+ if (!features().angle_instanced_arrays) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glVertexAttribDivisorANGLE", "function not available");
+ return error::kNoError;
+ }
+ GLuint index = c.index;
+ GLuint divisor = c.divisor;
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glVertexAttribDivisorANGLE", "index out of range");
+ return error::kNoError;
+ }
+
+ state_.vertex_attrib_manager->SetDivisor(
+ index,
+ divisor);
+ glVertexAttribDivisorANGLE(index, divisor);
+ return error::kNoError;
+}
+
+template <typename pixel_data_type>
+static void WriteAlphaData(
+ void *pixels, uint32 row_count, uint32 channel_count,
+ uint32 alpha_channel_index, uint32 unpadded_row_size,
+ uint32 padded_row_size, pixel_data_type alpha_value) {
+ DCHECK_GT(channel_count, 0U);
+ DCHECK_EQ(unpadded_row_size % sizeof(pixel_data_type), 0U);
+ uint32 unpadded_row_size_in_elements =
+ unpadded_row_size / sizeof(pixel_data_type);
+ DCHECK_EQ(padded_row_size % sizeof(pixel_data_type), 0U);
+ uint32 padded_row_size_in_elements =
+ padded_row_size / sizeof(pixel_data_type);
+ pixel_data_type* dst =
+ static_cast<pixel_data_type*>(pixels) + alpha_channel_index;
+ for (uint32 yy = 0; yy < row_count; ++yy) {
+ pixel_data_type* end = dst + unpadded_row_size_in_elements;
+ for (pixel_data_type* d = dst; d < end; d += channel_count) {
+ *d = alpha_value;
+ }
+ dst += padded_row_size_in_elements;
+ }
+}
+
+void GLES2DecoderImpl::FinishReadPixels(
+ const cmds::ReadPixels& c,
+ GLuint buffer) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::FinishReadPixels");
+ GLsizei width = c.width;
+ GLsizei height = c.height;
+ GLenum format = c.format;
+ GLenum type = c.type;
+ typedef cmds::ReadPixels::Result Result;
+ uint32 pixels_size;
+ Result* result = NULL;
+ if (c.result_shm_id != 0) {
+ result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ if (buffer != 0) {
+ glDeleteBuffersARB(1, &buffer);
+ }
+ return;
+ }
+ }
+ GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.pack_alignment, &pixels_size,
+ NULL, NULL);
+ void* pixels = GetSharedMemoryAs<void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ if (buffer != 0) {
+ glDeleteBuffersARB(1, &buffer);
+ }
+ return;
+ }
+
+ if (buffer != 0) {
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
+ void* data;
+ if (features().map_buffer_range) {
+ data = glMapBufferRange(
+ GL_PIXEL_PACK_BUFFER_ARB, 0, pixels_size, GL_MAP_READ_BIT);
+ } else {
+ data = glMapBuffer(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
+ }
+ memcpy(pixels, data, pixels_size);
+ // GL_PIXEL_PACK_BUFFER_ARB is currently unused, so we don't
+ // have to restore the state.
+ glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ glDeleteBuffersARB(1, &buffer);
+ }
+
+ if (result != NULL) {
+ *result = true;
+ }
+
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ if ((channels_exist & 0x0008) == 0 &&
+ workarounds().clear_alpha_in_readpixels) {
+ // Set the alpha to 255 because some drivers are buggy in this regard.
+ uint32 temp_size;
+
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, state_.pack_alignment, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ return;
+ }
+
+ uint32 channel_count = 0;
+ uint32 alpha_channel = 0;
+ switch (format) {
+ case GL_RGBA:
+ case GL_BGRA_EXT:
+ channel_count = 4;
+ alpha_channel = 3;
+ break;
+ case GL_ALPHA:
+ channel_count = 1;
+ alpha_channel = 0;
+ break;
+ }
+
+ if (channel_count > 0) {
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ WriteAlphaData<uint8>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 0xFF);
+ break;
+ case GL_FLOAT:
+ WriteAlphaData<float>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 1.0f);
+ break;
+ case GL_HALF_FLOAT:
+ WriteAlphaData<uint16>(
+ pixels, height, channel_count, alpha_channel, unpadded_row_size,
+ padded_row_size, 0x3C00);
+ break;
+ }
+ }
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleReadPixels(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReadPixels& c =
+ *static_cast<const gles2::cmds::ReadPixels*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleReadPixels");
+ error::Error fbo_error = WillAccessBoundFramebufferForRead();
+ if (fbo_error != error::kNoError)
+ return fbo_error;
+ GLint x = c.x;
+ GLint y = c.y;
+ GLsizei width = c.width;
+ GLsizei height = c.height;
+ GLenum format = c.format;
+ GLenum type = c.type;
+ GLboolean async = c.async;
+ if (width < 0 || height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
+ return error::kNoError;
+ }
+ typedef cmds::ReadPixels::Result Result;
+ uint32 pixels_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.pack_alignment, &pixels_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ void* pixels = GetSharedMemoryAs<void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ Result* result = NULL;
+ if (c.result_shm_id != 0) {
+ result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ if (!validators_->read_pixel_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glReadPixels", format, "format");
+ return error::kNoError;
+ }
+ if (!validators_->read_pixel_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glReadPixels", type, "type");
+ return error::kNoError;
+ }
+ if ((format != GL_RGBA && format != GL_BGRA_EXT && format != GL_RGB &&
+ format != GL_ALPHA) || type != GL_UNSIGNED_BYTE) {
+ // format and type are acceptable enums but not guaranteed to be supported
+ // for this framebuffer. Have to ask gl if they are valid.
+ GLint preferred_format = 0;
+ DoGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &preferred_format);
+ GLint preferred_type = 0;
+ DoGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &preferred_type);
+ if (format != static_cast<GLenum>(preferred_format) ||
+ type != static_cast<GLenum>(preferred_type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glReadPixels", "format and type incompatible "
+ "with the current read framebuffer");
+ return error::kNoError;
+ }
+ }
+ if (width == 0 || height == 0) {
+ return error::kNoError;
+ }
+
+ // Get the size of the current fbo or backbuffer.
+ gfx::Size max_size = GetBoundReadFrameBufferSize();
+
+ int32 max_x;
+ int32 max_y;
+ if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glReadPixels")) {
+ return error::kNoError;
+ }
+
+ if (!CheckBoundFramebuffersValid("glReadPixels")) {
+ return error::kNoError;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glReadPixels");
+
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+
+ if (x < 0 || y < 0 || max_x > max_size.width() || max_y > max_size.height()) {
+ // The user requested an out of range area. Get the results 1 line
+ // at a time.
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, state_.pack_alignment, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ GLint dest_x_offset = std::max(-x, 0);
+ uint32 dest_row_offset;
+ if (!GLES2Util::ComputeImageDataSizes(
+ dest_x_offset, 1, format, type, state_.pack_alignment, &dest_row_offset,
+ NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
+ return error::kNoError;
+ }
+
+ // Copy each row into the larger dest rect.
+ int8* dst = static_cast<int8*>(pixels);
+ GLint read_x = std::max(0, x);
+ GLint read_end_x = std::max(0, std::min(max_size.width(), max_x));
+ GLint read_width = read_end_x - read_x;
+ for (GLint yy = 0; yy < height; ++yy) {
+ GLint ry = y + yy;
+
+ // Clear the row.
+ memset(dst, 0, unpadded_row_size);
+
+ // If the row is in range, copy it.
+ if (ry >= 0 && ry < max_size.height() && read_width > 0) {
+ glReadPixels(
+ read_x, ry, read_width, 1, format, type, dst + dest_row_offset);
+ }
+ dst += padded_row_size;
+ }
+ } else {
+ if (async && features().use_async_readpixels) {
+ GLuint buffer;
+ glGenBuffersARB(1, &buffer);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
+ glBufferData(GL_PIXEL_PACK_BUFFER_ARB, pixels_size, NULL, GL_STREAM_READ);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ glReadPixels(x, y, width, height, format, type, 0);
+ pending_readpixel_fences_.push(linked_ptr<FenceCallback>(
+ new FenceCallback()));
+ WaitForReadPixels(base::Bind(
+ &GLES2DecoderImpl::FinishReadPixels,
+ base::internal::SupportsWeakPtrBase::StaticAsWeakPtr
+ <GLES2DecoderImpl>(this),
+ c, buffer));
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ return error::kNoError;
+ } else {
+ // On error, unbind pack buffer and fall through to sync readpixels
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
+ }
+ }
+ glReadPixels(x, y, width, height, format, type, pixels);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glReadPixels");
+ if (error == GL_NO_ERROR) {
+ if (result != NULL) {
+ *result = true;
+ }
+ FinishReadPixels(c, 0);
+ }
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePixelStorei(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PixelStorei& c =
+ *static_cast<const gles2::cmds::PixelStorei*>(cmd_data);
+ GLenum pname = c.pname;
+ GLenum param = c.param;
+ if (!validators_->pixel_store.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glPixelStorei", pname, "pname");
+ return error::kNoError;
+ }
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ case GL_UNPACK_ALIGNMENT:
+ if (!validators_->pixel_store_alignment.IsValid(param)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glPixelStorei", "param GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ break;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ unpack_flip_y_ = (param != 0);
+ return error::kNoError;
+ case GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM:
+ unpack_premultiply_alpha_ = (param != 0);
+ return error::kNoError;
+ case GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM:
+ unpack_unpremultiply_alpha_ = (param != 0);
+ return error::kNoError;
+ default:
+ break;
+ }
+ glPixelStorei(pname, param);
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ state_.pack_alignment = param;
+ break;
+ case GL_PACK_REVERSE_ROW_ORDER_ANGLE:
+ state_.pack_reverse_row_order = (param != 0);
+ break;
+ case GL_UNPACK_ALIGNMENT:
+ state_.unpack_alignment = param;
+ break;
+ default:
+ // Validation should have prevented us from getting here.
+ NOTREACHED();
+ break;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PostSubBufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::PostSubBufferCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandlePostSubBufferCHROMIUM");
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("gpu.PresentingFrame");
+ }
+ if (!supports_post_sub_buffer_) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glPostSubBufferCHROMIUM", "command not supported by surface");
+ return error::kNoError;
+ }
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ &is_tracing);
+ if (is_tracing) {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+ ScopedFrameBufferBinder binder(this, GetBackbufferServiceId());
+ gpu_state_tracer_->TakeSnapshotWithCurrentFramebuffer(
+ is_offscreen ? offscreen_size_ : surface_->GetSize());
+ }
+ if (surface_->PostSubBuffer(c.x, c.y, c.width, c.height)) {
+ return error::kNoError;
+ } else {
+ LOG(ERROR) << "Context lost because PostSubBuffer failed.";
+ return error::kLostContext;
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleScheduleOverlayPlaneCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ScheduleOverlayPlaneCHROMIUM& c =
+ *static_cast<const gles2::cmds::ScheduleOverlayPlaneCHROMIUM*>(cmd_data);
+ TextureRef* ref = texture_manager()->GetTexture(c.overlay_texture_id);
+ if (!ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "unknown texture");
+ return error::kNoError;
+ }
+ gfx::GLImage* image =
+ ref->texture()->GetLevelImage(ref->texture()->target(), 0);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "unsupported texture format");
+ return error::kNoError;
+ }
+ gfx::OverlayTransform transform = GetGFXOverlayTransform(c.plane_transform);
+ if (transform == gfx::OVERLAY_TRANSFORM_INVALID) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "invalid transform enum");
+ return error::kNoError;
+ }
+ if (!surface_->ScheduleOverlayPlane(
+ c.plane_z_order,
+ transform,
+ image,
+ gfx::Rect(c.bounds_x, c.bounds_y, c.bounds_width, c.bounds_height),
+ gfx::RectF(c.uv_x, c.uv_y, c.uv_width, c.uv_height))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glScheduleOverlayPlaneCHROMIUM",
+ "failed to schedule overlay");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::GetAttribLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str) {
+ if (!StringIsValidForGLES(name_str.c_str())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetAttribLocation", "Invalid character");
+ return error::kNoError;
+ }
+ Program* program = GetProgramInfoNotShader(
+ client_id, "glGetAttribLocation");
+ if (!program) {
+ return error::kNoError;
+ }
+ if (!program->IsValid()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetAttribLocation", "program not linked");
+ return error::kNoError;
+ }
+ GLint* location = GetSharedMemoryAs<GLint*>(
+ location_shm_id, location_shm_offset, sizeof(GLint));
+ if (!location) {
+ return error::kOutOfBounds;
+ }
+ // Require the client to init this incase the context is lost and we are no
+ // longer executing commands.
+ if (*location != -1) {
+ return error::kGenericError;
+ }
+ *location = program->GetAttribLocation(name_str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetAttribLocation(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetAttribLocation& c =
+ *static_cast<const gles2::cmds::GetAttribLocation*>(cmd_data);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ return GetAttribLocationHelper(
+ c.program, c.location_shm_id, c.location_shm_offset, name_str);
+}
+
+error::Error GLES2DecoderImpl::GetUniformLocationHelper(
+ GLuint client_id, uint32 location_shm_id, uint32 location_shm_offset,
+ const std::string& name_str) {
+ if (!StringIsValidForGLES(name_str.c_str())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetUniformLocation", "Invalid character");
+ return error::kNoError;
+ }
+ Program* program = GetProgramInfoNotShader(
+ client_id, "glGetUniformLocation");
+ if (!program) {
+ return error::kNoError;
+ }
+ if (!program->IsValid()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniformLocation", "program not linked");
+ return error::kNoError;
+ }
+ GLint* location = GetSharedMemoryAs<GLint*>(
+ location_shm_id, location_shm_offset, sizeof(GLint));
+ if (!location) {
+ return error::kOutOfBounds;
+ }
+ // Require the client to init this incase the context is lost an we are no
+ // longer executing commands.
+ if (*location != -1) {
+ return error::kGenericError;
+ }
+ *location = program->GetUniformFakeLocation(name_str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformLocation(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformLocation& c =
+ *static_cast<const gles2::cmds::GetUniformLocation*>(cmd_data);
+ Bucket* bucket = GetBucket(c.name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ return GetUniformLocationHelper(
+ c.program, c.location_shm_id, c.location_shm_offset, name_str);
+}
+
+error::Error GLES2DecoderImpl::HandleGetString(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetString& c =
+ *static_cast<const gles2::cmds::GetString*>(cmd_data);
+ GLenum name = static_cast<GLenum>(c.name);
+ if (!validators_->string_type.IsValid(name)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetString", name, "name");
+ return error::kNoError;
+ }
+ const char* str = reinterpret_cast<const char*>(glGetString(name));
+ std::string extensions;
+ switch (name) {
+ case GL_VERSION:
+ str = "OpenGL ES 2.0 Chromium";
+ break;
+ case GL_SHADING_LANGUAGE_VERSION:
+ str = "OpenGL ES GLSL ES 1.0 Chromium";
+ break;
+ case GL_RENDERER:
+ case GL_VENDOR:
+ // Return the unmasked VENDOR/RENDERER string for WebGL contexts.
+ // They are used by WEBGL_debug_renderer_info.
+ if (!force_webgl_glsl_validation_)
+ str = "Chromium";
+ break;
+ case GL_EXTENSIONS:
+ {
+ // For WebGL contexts, strip out the OES derivatives and
+ // EXT frag depth extensions if they have not been enabled.
+ if (force_webgl_glsl_validation_) {
+ extensions = feature_info_->extensions();
+ if (!derivatives_explicitly_enabled_) {
+ size_t offset = extensions.find(kOESDerivativeExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kOESDerivativeExtension),
+ std::string());
+ }
+ }
+ if (!frag_depth_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTFragDepthExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kEXTFragDepthExtension),
+ std::string());
+ }
+ }
+ if (!draw_buffers_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTDrawBuffersExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset, arraysize(kEXTDrawBuffersExtension),
+ std::string());
+ }
+ }
+ if (!shader_texture_lod_explicitly_enabled_) {
+ size_t offset = extensions.find(kEXTShaderTextureLodExtension);
+ if (std::string::npos != offset) {
+ extensions.replace(offset,
+ arraysize(kEXTShaderTextureLodExtension),
+ std::string());
+ }
+ }
+ } else {
+ extensions = feature_info_->extensions().c_str();
+ }
+ if (supports_post_sub_buffer_)
+ extensions += " GL_CHROMIUM_post_sub_buffer";
+ str = extensions.c_str();
+ }
+ break;
+ default:
+ break;
+ }
+ Bucket* bucket = CreateBucket(c.bucket_id);
+ bucket->SetFromString(str);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBufferData(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BufferData& c =
+ *static_cast<const gles2::cmds::BufferData*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32 data_shm_id = static_cast<uint32>(c.data_shm_id);
+ uint32 data_shm_offset = static_cast<uint32>(c.data_shm_offset);
+ GLenum usage = static_cast<GLenum>(c.usage);
+ const void* data = NULL;
+ if (data_shm_id != 0 || data_shm_offset != 0) {
+ data = GetSharedMemoryAs<const void*>(data_shm_id, data_shm_offset, size);
+ if (!data) {
+ return error::kOutOfBounds;
+ }
+ }
+ buffer_manager()->ValidateAndDoBufferData(&state_, target, size, data, usage);
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoBufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data) {
+ // Just delegate it. Some validation is actually done before this.
+ buffer_manager()->ValidateAndDoBufferSubData(
+ &state_, target, offset, size, data);
+}
+
+bool GLES2DecoderImpl::ClearLevel(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) {
+ uint32 channels = GLES2Util::GetChannelsForFormat(format);
+ if (feature_info_->feature_flags().angle_depth_texture &&
+ (channels & GLES2Util::kDepth) != 0) {
+ // It's a depth format and ANGLE doesn't allow texImage2D or texSubImage2D
+ // on depth formats.
+ GLuint fb = 0;
+ glGenFramebuffersEXT(1, &fb);
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, fb);
+
+ bool have_stencil = (channels & GLES2Util::kStencil) != 0;
+ GLenum attachment = have_stencil ? GL_DEPTH_STENCIL_ATTACHMENT :
+ GL_DEPTH_ATTACHMENT;
+
+ glFramebufferTexture2DEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, attachment, target, service_id, level);
+ // ANGLE promises a depth only attachment ok.
+ if (glCheckFramebufferStatusEXT(GL_DRAW_FRAMEBUFFER_EXT) !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ return false;
+ }
+ glClearStencil(0);
+ state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
+ state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
+ glClearDepth(1.0f);
+ state_.SetDeviceDepthMask(GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_DEPTH_BUFFER_BIT | (have_stencil ? GL_STENCIL_BUFFER_BIT : 0));
+
+ RestoreClearState();
+
+ glDeleteFramebuffersEXT(1, &fb);
+ Framebuffer* framebuffer =
+ GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER_EXT);
+ GLuint fb_service_id =
+ framebuffer ? framebuffer->service_id() : GetBackbufferServiceId();
+ glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, fb_service_id);
+ return true;
+ }
+
+ static const uint32 kMaxZeroSize = 1024 * 1024 * 4;
+
+ uint32 size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &size,
+ NULL, &padded_row_size)) {
+ return false;
+ }
+
+ TRACE_EVENT1("gpu", "GLES2DecoderImpl::ClearLevel", "size", size);
+
+ int tile_height;
+
+ if (size > kMaxZeroSize) {
+ if (kMaxZeroSize < padded_row_size) {
+ // That'd be an awfully large texture.
+ return false;
+ }
+ // We should never have a large total size with a zero row size.
+ DCHECK_GT(padded_row_size, 0U);
+ tile_height = kMaxZeroSize / padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, tile_height, format, type, state_.unpack_alignment, &size,
+ NULL, NULL)) {
+ return false;
+ }
+ } else {
+ tile_height = height;
+ }
+
+ // Assumes the size has already been checked.
+ scoped_ptr<char[]> zero(new char[size]);
+ memset(zero.get(), 0, size);
+ glBindTexture(bind_target, service_id);
+
+ GLint y = 0;
+ while (y < height) {
+ GLint h = y + tile_height > height ? height - y : tile_height;
+ if (is_texture_immutable || h != height) {
+ glTexSubImage2D(target, level, 0, y, width, h, format, type, zero.get());
+ } else {
+ glTexImage2D(
+ target, level, internal_format, width, h, 0, format, type,
+ zero.get());
+ }
+ y += tile_height;
+ }
+ TextureRef* texture = texture_manager()->GetTextureInfoForTarget(
+ &state_, bind_target);
+ glBindTexture(bind_target, texture ? texture->service_id() : 0);
+ return true;
+}
+
+namespace {
+
+const int kS3TCBlockWidth = 4;
+const int kS3TCBlockHeight = 4;
+const int kS3TCDXT1BlockSize = 8;
+const int kS3TCDXT3AndDXT5BlockSize = 16;
+
+bool IsValidDXTSize(GLint level, GLsizei size) {
+ return (size == 1) ||
+ (size == 2) || !(size % kS3TCBlockWidth);
+}
+
+bool IsValidPVRTCSize(GLint level, GLsizei size) {
+ return GLES2Util::IsPOT(size);
+}
+
+} // anonymous namespace.
+
+bool GLES2DecoderImpl::ValidateCompressedTexFuncData(
+ const char* function_name,
+ GLsizei width, GLsizei height, GLenum format, size_t size) {
+ unsigned int bytes_required = 0;
+
+ switch (format) {
+ case GL_ATC_RGB_AMD:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_ETC1_RGB8_OES: {
+ int num_blocks_across =
+ (width + kS3TCBlockWidth - 1) / kS3TCBlockWidth;
+ int num_blocks_down =
+ (height + kS3TCBlockHeight - 1) / kS3TCBlockHeight;
+ int num_blocks = num_blocks_across * num_blocks_down;
+ bytes_required = num_blocks * kS3TCDXT1BlockSize;
+ break;
+ }
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ int num_blocks_across =
+ (width + kS3TCBlockWidth - 1) / kS3TCBlockWidth;
+ int num_blocks_down =
+ (height + kS3TCBlockHeight - 1) / kS3TCBlockHeight;
+ int num_blocks = num_blocks_across * num_blocks_down;
+ bytes_required = num_blocks * kS3TCDXT3AndDXT5BlockSize;
+ break;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG: {
+ bytes_required = (std::max(width, 8) * std::max(height, 8) * 4 + 7)/8;
+ break;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ bytes_required = (std::max(width, 16) * std::max(height, 8) * 2 + 7)/8;
+ break;
+ }
+ default:
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, format, "format");
+ return false;
+ }
+
+ if (size != bytes_required) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "size is not correct for dimensions");
+ return false;
+ }
+
+ return true;
+}
+
+bool GLES2DecoderImpl::ValidateCompressedTexDimensions(
+ const char* function_name,
+ GLint level, GLsizei width, GLsizei height, GLenum format) {
+ switch (format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ if (!IsValidDXTSize(level, width) || !IsValidDXTSize(level, height)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ case GL_ETC1_RGB8_OES: {
+ if (width <= 0 || height <= 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ if (!IsValidPVRTCSize(level, width) ||
+ !IsValidPVRTCSize(level, height)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "width or height invalid for level");
+ return false;
+ }
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+bool GLES2DecoderImpl::ValidateCompressedTexSubDimensions(
+ const char* function_name,
+ GLenum target, GLint level, GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height, GLenum format,
+ Texture* texture) {
+ if (xoffset < 0 || yoffset < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, function_name, "xoffset or yoffset < 0");
+ return false;
+ }
+
+ switch (format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: {
+ const int kBlockWidth = 4;
+ const int kBlockHeight = 4;
+ if ((xoffset % kBlockWidth) || (yoffset % kBlockHeight)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "xoffset or yoffset not multiple of 4");
+ return false;
+ }
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ if (!texture->GetLevelSize(target, level, &tex_width, &tex_height) ||
+ width - xoffset > tex_width ||
+ height - yoffset > tex_height) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "dimensions out of range");
+ return false;
+ }
+ return ValidateCompressedTexDimensions(
+ function_name, level, width, height, format);
+ }
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD: {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "not supported for ATC textures");
+ return false;
+ }
+ case GL_ETC1_RGB8_OES: {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "not supported for ECT1_RGB8_OES textures");
+ return false;
+ }
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: {
+ if ((xoffset != 0) || (yoffset != 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "xoffset and yoffset must be zero");
+ return false;
+ }
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ if (!texture->GetLevelSize(target, level, &tex_width, &tex_height) ||
+ width != tex_width ||
+ height != tex_height) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "dimensions must match existing texture level dimensions");
+ return false;
+ }
+ return ValidateCompressedTexDimensions(
+ function_name, level, width, height, format);
+ }
+ default:
+ return false;
+ }
+}
+
+error::Error GLES2DecoderImpl::DoCompressedTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei image_size,
+ const void* data) {
+ // TODO(gman): Validate image_size is correct for width, height and format.
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(
+ internal_format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexImage2D", internal_format, "internal_format");
+ return error::kNoError;
+ }
+ if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
+ border != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCompressedTexImage2D", "dimensions out of range");
+ return error::kNoError;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCompressedTexImage2D", "unknown texture target");
+ return error::kNoError;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexImage2D", "texture is immutable");
+ return error::kNoError;
+ }
+
+ if (!ValidateCompressedTexDimensions(
+ "glCompressedTexImage2D", level, width, height, internal_format) ||
+ !ValidateCompressedTexFuncData(
+ "glCompressedTexImage2D", width, height, internal_format, image_size)) {
+ return error::kNoError;
+ }
+
+ if (!EnsureGPUMemoryAvailable(image_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCompressedTexImage2D", "out of memory");
+ return error::kNoError;
+ }
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ scoped_ptr<int8[]> zero;
+ if (!data) {
+ zero.reset(new int8[image_size]);
+ memset(zero.get(), 0, image_size);
+ data = zero.get();
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCompressedTexImage2D");
+ glCompressedTexImage2D(
+ target, level, internal_format, width, height, border, image_size, data);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCompressedTexImage2D");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, level, internal_format,
+ width, height, 1, border, 0, 0, true);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexImage2D(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexImage2D& c =
+ *static_cast<const gles2::cmds::CompressedTexImage2D*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLsizei image_size = static_cast<GLsizei>(c.imageSize);
+ uint32 data_shm_id = static_cast<uint32>(c.data_shm_id);
+ uint32 data_shm_offset = static_cast<uint32>(c.data_shm_offset);
+ const void* data = NULL;
+ if (data_shm_id != 0 || data_shm_offset != 0) {
+ data = GetSharedMemoryAs<const void*>(
+ data_shm_id, data_shm_offset, image_size);
+ if (!data) {
+ return error::kOutOfBounds;
+ }
+ }
+ return DoCompressedTexImage2D(
+ target, level, internal_format, width, height, border, image_size, data);
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexImage2DBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexImage2DBucket& c =
+ *static_cast<const gles2::cmds::CompressedTexImage2DBucket*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 data_size = bucket->size();
+ GLsizei imageSize = data_size;
+ const void* data = bucket->GetData(0, data_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ return DoCompressedTexImage2D(
+ target, level, internal_format, width, height, border,
+ imageSize, data);
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexSubImage2DBucket(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexSubImage2DBucket& c =
+ *static_cast<const gles2::cmds::CompressedTexSubImage2DBucket*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ uint32 data_size = bucket->size();
+ GLsizei imageSize = data_size;
+ const void* data = bucket->GetData(0, data_size);
+ if (!data) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM, "glCompressedTexSubImage2D", "target");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", format, "format");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ if (imageSize < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "imageSize < 0");
+ return error::kNoError;
+ }
+ DoCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexImage2D(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexImage2D& c =
+ *static_cast<const gles2::cmds::TexImage2D*>(cmd_data);
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexImage2D",
+ "width", c.width, "height", c.height);
+ // Set as failed for now, but if it successed, this will be set to not failed.
+ texture_state_.tex_image_2d_failed = true;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ // TODO(kloveless): Change TexImage2D command to use unsigned integer
+ // for internalformat.
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
+ uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
+ uint32 pixels_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size, NULL,
+ NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = NULL;
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAs<const void*>(
+ pixels_shm_id, pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ TextureManager::DoTextImage2DArguments args = {
+ target, level, internal_format, width, height, border, format, type,
+ pixels, pixels_size};
+ texture_manager()->ValidateAndDoTexImage2D(
+ &texture_state_, &state_, &framebuffer_state_, args);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoCompressedTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei image_size,
+ const void * data) {
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum type = 0;
+ GLenum internal_format = 0;
+ if (!texture->GetLevelType(target, level, &type, &internal_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "level does not exist.");
+ return;
+ }
+ if (internal_format != format) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D", "format does not match internal format.");
+ return;
+ }
+ if (!texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "bad dimensions.");
+ return;
+ }
+
+ if (!ValidateCompressedTexFuncData(
+ "glCompressedTexSubImage2D", width, height, format, image_size) ||
+ !ValidateCompressedTexSubDimensions(
+ "glCompressedTexSubImage2D",
+ target, level, xoffset, yoffset, width, height, format, texture)) {
+ return;
+ }
+
+
+ // Note: There is no need to deal with texture cleared tracking here
+ // because the validation above means you can only get here if the level
+ // is already a matching compressed format and in that case
+ // CompressedTexImage2D already cleared the texture.
+ glCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, image_size, data);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+static void Clip(
+ GLint start, GLint range, GLint sourceRange,
+ GLint* out_start, GLint* out_range) {
+ DCHECK(out_start);
+ DCHECK(out_range);
+ if (start < 0) {
+ range += start;
+ start = 0;
+ }
+ GLint end = start + range;
+ if (end > sourceRange) {
+ range -= end - sourceRange;
+ }
+ *out_start = start;
+ *out_range = range;
+}
+
+void GLES2DecoderImpl::DoCopyTexImage2D(
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ DCHECK(!ShouldDeferReads());
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexImage2D", "texture is immutable");
+ return;
+ }
+ if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
+ border != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexImage2D", "dimensions out of range");
+ return;
+ }
+ if (!texture_manager()->ValidateFormatAndTypeCombination(
+ state_.GetErrorState(), "glCopyTexImage2D", internal_format,
+ GL_UNSIGNED_BYTE)) {
+ return;
+ }
+
+ // Check we have compatible formats.
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ uint32 channels_needed = GLES2Util::GetChannelsForFormat(internal_format);
+
+ if ((channels_needed & channels_exist) != channels_needed) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexImage2D", "incompatible format");
+ return;
+ }
+
+ if ((channels_needed & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexImage2D", "can not be used with depth or stencil textures");
+ return;
+ }
+
+ uint32 estimated_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, internal_format, GL_UNSIGNED_BYTE, state_.unpack_alignment,
+ &estimated_size, NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexImage2D", "dimensions too large");
+ return;
+ }
+
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopyTexImage2D", "out of memory");
+ return;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glCopyTexImage2D")) {
+ return;
+ }
+
+ if (!CheckBoundFramebuffersValid("glCopyTexImage2D")) {
+ return;
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTexImage2D");
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+ gfx::Size size = GetBoundReadFrameBufferSize();
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+
+ // Clip to size to source dimensions
+ GLint copyX = 0;
+ GLint copyY = 0;
+ GLint copyWidth = 0;
+ GLint copyHeight = 0;
+ Clip(x, width, size.width(), ©X, ©Width);
+ Clip(y, height, size.height(), ©Y, ©Height);
+
+ if (copyX != x ||
+ copyY != y ||
+ copyWidth != width ||
+ copyHeight != height) {
+ // some part was clipped so clear the texture.
+ if (!ClearLevel(
+ texture->service_id(), texture->target(),
+ target, level, internal_format, internal_format, GL_UNSIGNED_BYTE,
+ width, height, texture->IsImmutable())) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexImage2D", "dimensions too big");
+ return;
+ }
+ if (copyHeight > 0 && copyWidth > 0) {
+ GLint dx = copyX - x;
+ GLint dy = copyY - y;
+ GLint destX = dx;
+ GLint destY = dy;
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexSubImage2D(target, level,
+ destX, destY, copyX, copyY,
+ copyWidth, copyHeight);
+ }
+ } else {
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexImage2D(target, level, internal_format,
+ copyX, copyY, copyWidth, copyHeight, border);
+ }
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCopyTexImage2D");
+ if (error == GL_NO_ERROR) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, level, internal_format, width, height, 1,
+ border, internal_format, GL_UNSIGNED_BYTE, true);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+void GLES2DecoderImpl::DoCopyTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ DCHECK(!ShouldDeferReads());
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexSubImage2D", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum type = 0;
+ GLenum format = 0;
+ if (!texture->GetLevelType(target, level, &type, &format) ||
+ !texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexSubImage2D", "bad dimensions.");
+ return;
+ }
+ if (async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopyTexSubImage2D", "async upload pending for texture");
+ return;
+ }
+
+ // Check we have compatible formats.
+ GLenum read_format = GetBoundReadFrameBufferInternalFormat();
+ uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
+ uint32 channels_needed = GLES2Util::GetChannelsForFormat(format);
+
+ if (!channels_needed ||
+ (channels_needed & channels_exist) != channels_needed) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glCopyTexSubImage2D", "incompatible format");
+ return;
+ }
+
+ if ((channels_needed & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCopySubImage2D", "can not be used with depth or stencil textures");
+ return;
+ }
+
+ if (!CheckBoundReadFramebufferColorAttachment("glCopyTexSubImage2D")) {
+ return;
+ }
+
+ if (!CheckBoundFramebuffersValid("glCopyTexSubImage2D")) {
+ return;
+ }
+
+ ScopedResolvedFrameBufferBinder binder(this, false, true);
+ gfx::Size size = GetBoundReadFrameBufferSize();
+ GLint copyX = 0;
+ GLint copyY = 0;
+ GLint copyWidth = 0;
+ GLint copyHeight = 0;
+ Clip(x, width, size.width(), ©X, ©Width);
+ Clip(y, height, size.height(), ©Y, ©Height);
+
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref, target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTexSubImage2D", "dimensions too big");
+ return;
+ }
+
+ if (copyX != x ||
+ copyY != y ||
+ copyWidth != width ||
+ copyHeight != height) {
+ // some part was clipped so clear the sub rect.
+ uint32 pixels_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size,
+ NULL, NULL)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTexSubImage2D", "dimensions too large");
+ return;
+ }
+ scoped_ptr<char[]> zero(new char[pixels_size]);
+ memset(zero.get(), 0, pixels_size);
+ ScopedModifyPixels modify(texture_ref);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height,
+ format, type, zero.get());
+ }
+
+ if (copyHeight > 0 && copyWidth > 0) {
+ GLint dx = copyX - x;
+ GLint dy = copyY - y;
+ GLint destX = xoffset + dx;
+ GLint destY = yoffset + dy;
+ ScopedModifyPixels modify(texture_ref);
+ glCopyTexSubImage2D(target, level,
+ destX, destY, copyX, copyY,
+ copyWidth, copyHeight);
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+bool GLES2DecoderImpl::ValidateTexSubImage2D(
+ error::Error* error,
+ const char* function_name,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data) {
+ (*error) = error::kNoError;
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, target, "target");
+ return false;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "width < 0");
+ return false;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "height < 0");
+ return false;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "unknown texture for target");
+ return false;
+ }
+ Texture* texture = texture_ref->texture();
+ GLenum current_type = 0;
+ GLenum internal_format = 0;
+ if (!texture->GetLevelType(target, level, ¤t_type, &internal_format)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name, "level does not exist.");
+ return false;
+ }
+ if (!texture_manager()->ValidateTextureParameters(state_.GetErrorState(),
+ function_name, format, type, internal_format, level)) {
+ return false;
+ }
+ if (type != current_type) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "type does not match type of texture.");
+ return false;
+ }
+ if (async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "async upload pending for texture");
+ return false;
+ }
+ if (!texture->ValidForTexture(
+ target, level, xoffset, yoffset, width, height, type)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "bad dimensions.");
+ return false;
+ }
+ if ((GLES2Util::GetChannelsForFormat(format) &
+ (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "can not supply data for depth or stencil textures");
+ return false;
+ }
+ if (data == NULL) {
+ (*error) = error::kOutOfBounds;
+ return false;
+ }
+ return true;
+}
+
+error::Error GLES2DecoderImpl::DoTexSubImage2D(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void * data) {
+ error::Error error = error::kNoError;
+ if (!ValidateTexSubImage2D(&error, "glTexSubImage2D", target, level,
+ xoffset, yoffset, width, height, format, type, data)) {
+ return error;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ Texture* texture = texture_ref->texture();
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ bool ok = texture->GetLevelSize(target, level, &tex_width, &tex_height);
+ DCHECK(ok);
+ if (xoffset != 0 || yoffset != 0 ||
+ width != tex_width || height != tex_height) {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref,
+ target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexSubImage2D", "dimensions too big");
+ return error::kNoError;
+ }
+ ScopedTextureUploadTimer timer(&texture_state_);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+ return error::kNoError;
+ }
+
+ if (!texture_state_.texsubimage2d_faster_than_teximage2d &&
+ !texture->IsImmutable()) {
+ ScopedTextureUploadTimer timer(&texture_state_);
+ GLenum internal_format;
+ GLenum tex_type;
+ texture->GetLevelType(target, level, &tex_type, &internal_format);
+ // NOTE: In OpenGL ES 2.0 border is always zero. If that changes we'll need
+ // to look it up.
+ glTexImage2D(
+ target, level, internal_format, width, height, 0, format, type, data);
+ } else {
+ ScopedTextureUploadTimer timer(&texture_state_);
+ glTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+ }
+ texture_manager()->SetLevelCleared(texture_ref, target, level, true);
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexSubImage2D(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexSubImage2D& c =
+ *static_cast<const gles2::cmds::TexSubImage2D*>(cmd_data);
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::HandleTexSubImage2D",
+ "width", c.width, "height", c.height);
+ GLboolean internal = static_cast<GLboolean>(c.internal);
+ if (internal == GL_TRUE && texture_state_.tex_image_2d_failed)
+ return error::kNoError;
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 data_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &data_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = GetSharedMemoryAs<const void*>(
+ c.pixels_shm_id, c.pixels_shm_offset, data_size);
+ return DoTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribPointerv(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribPointerv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribPointerv*>(cmd_data);
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribPointerv::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.pointer_shm_id, c.pointer_shm_offset, Result::ComputeSize(1));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->vertex_pointer.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetVertexAttribPointerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (index >= group_->max_vertex_attribs()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetVertexAttribPointerv", "index out of range.");
+ return error::kNoError;
+ }
+ result->SetNumResults(1);
+ *result->GetData() =
+ state_.vertex_attrib_manager->GetVertexAttrib(index)->offset();
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GetUniformSetup(
+ GLuint program_id, GLint fake_location,
+ uint32 shm_id, uint32 shm_offset,
+ error::Error* error, GLint* real_location,
+ GLuint* service_id, void** result_pointer, GLenum* result_type) {
+ DCHECK(error);
+ DCHECK(service_id);
+ DCHECK(result_pointer);
+ DCHECK(result_type);
+ DCHECK(real_location);
+ *error = error::kNoError;
+ // Make sure we have enough room for the result on failure.
+ SizedResult<GLint>* result;
+ result = GetSharedMemoryAs<SizedResult<GLint>*>(
+ shm_id, shm_offset, SizedResult<GLint>::ComputeSize(0));
+ if (!result) {
+ *error = error::kOutOfBounds;
+ return false;
+ }
+ *result_pointer = result;
+ // Set the result size to 0 so the client does not have to check for success.
+ result->SetNumResults(0);
+ Program* program = GetProgramInfoNotShader(program_id, "glGetUniform");
+ if (!program) {
+ return false;
+ }
+ if (!program->IsValid()) {
+ // Program was not linked successfully. (ie, glLinkProgram)
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniform", "program not linked");
+ return false;
+ }
+ *service_id = program->service_id();
+ GLint array_index = -1;
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfoByFakeLocation(
+ fake_location, real_location, &array_index);
+ if (!uniform_info) {
+ // No such location.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glGetUniform", "unknown location");
+ return false;
+ }
+ GLenum type = uniform_info->type;
+ GLsizei size = GLES2Util::GetGLDataTypeSizeForUniforms(type);
+ if (size == 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glGetUniform", "unknown type");
+ return false;
+ }
+ result = GetSharedMemoryAs<SizedResult<GLint>*>(
+ shm_id, shm_offset, SizedResult<GLint>::ComputeSizeFromBytes(size));
+ if (!result) {
+ *error = error::kOutOfBounds;
+ return false;
+ }
+ result->size = size;
+ *result_type = type;
+ return true;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformiv(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformiv& c =
+ *static_cast<const gles2::cmds::GetUniformiv*>(cmd_data);
+ GLuint program = c.program;
+ GLint fake_location = c.location;
+ GLuint service_id;
+ GLenum result_type;
+ GLint real_location = -1;
+ Error error;
+ void* result;
+ if (GetUniformSetup(
+ program, fake_location, c.params_shm_id, c.params_shm_offset,
+ &error, &real_location, &service_id, &result, &result_type)) {
+ glGetUniformiv(
+ service_id, real_location,
+ static_cast<cmds::GetUniformiv::Result*>(result)->GetData());
+ }
+ return error;
+}
+
+error::Error GLES2DecoderImpl::HandleGetUniformfv(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetUniformfv& c =
+ *static_cast<const gles2::cmds::GetUniformfv*>(cmd_data);
+ GLuint program = c.program;
+ GLint fake_location = c.location;
+ GLuint service_id;
+ GLint real_location = -1;
+ Error error;
+ typedef cmds::GetUniformfv::Result Result;
+ Result* result;
+ GLenum result_type;
+ if (GetUniformSetup(
+ program, fake_location, c.params_shm_id, c.params_shm_offset,
+ &error, &real_location, &service_id,
+ reinterpret_cast<void**>(&result), &result_type)) {
+ if (result_type == GL_BOOL || result_type == GL_BOOL_VEC2 ||
+ result_type == GL_BOOL_VEC3 || result_type == GL_BOOL_VEC4) {
+ GLsizei num_values = result->GetNumResults();
+ scoped_ptr<GLint[]> temp(new GLint[num_values]);
+ glGetUniformiv(service_id, real_location, temp.get());
+ GLfloat* dst = result->GetData();
+ for (GLsizei ii = 0; ii < num_values; ++ii) {
+ dst[ii] = (temp[ii] != 0);
+ }
+ } else {
+ glGetUniformfv(service_id, real_location, result->GetData());
+ }
+ }
+ return error;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderPrecisionFormat(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderPrecisionFormat& c =
+ *static_cast<const gles2::cmds::GetShaderPrecisionFormat*>(cmd_data);
+ GLenum shader_type = static_cast<GLenum>(c.shadertype);
+ GLenum precision_type = static_cast<GLenum>(c.precisiontype);
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ if (!validators_->shader_type.IsValid(shader_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetShaderPrecisionFormat", shader_type, "shader_type");
+ return error::kNoError;
+ }
+ if (!validators_->shader_precision.IsValid(precision_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetShaderPrecisionFormat", precision_type, "precision_type");
+ return error::kNoError;
+ }
+
+ result->success = 1; // true
+
+ GLint range[2] = { 0, 0 };
+ GLint precision = 0;
+ GetShaderPrecisionFormatImpl(shader_type, precision_type, range, &precision);
+
+ result->min_range = range[0];
+ result->max_range = range[1];
+ result->precision = precision;
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetAttachedShaders(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetAttachedShaders& c =
+ *static_cast<const gles2::cmds::GetAttachedShaders*>(cmd_data);
+ uint32 result_size = c.result_size;
+ GLuint program_id = static_cast<GLuint>(c.program);
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetAttachedShaders");
+ if (!program) {
+ return error::kNoError;
+ }
+ typedef cmds::GetAttachedShaders::Result Result;
+ uint32 max_count = Result::ComputeMaxResults(result_size);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, Result::ComputeSize(max_count));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ GLsizei count = 0;
+ glGetAttachedShaders(
+ program->service_id(), max_count, &count, result->GetData());
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (!shader_manager()->GetClientId(result->GetData()[ii],
+ &result->GetData()[ii])) {
+ NOTREACHED();
+ return error::kGenericError;
+ }
+ }
+ result->SetNumResults(count);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetActiveUniform(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetActiveUniform& c =
+ *static_cast<const gles2::cmds::GetActiveUniform*>(cmd_data);
+ GLuint program_id = c.program;
+ GLuint index = c.index;
+ uint32 name_bucket_id = c.name_bucket_id;
+ typedef cmds::GetActiveUniform::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetActiveUniform");
+ if (!program) {
+ return error::kNoError;
+ }
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfo(index);
+ if (!uniform_info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetActiveUniform", "index out of range");
+ return error::kNoError;
+ }
+ result->success = 1; // true.
+ result->size = uniform_info->size;
+ result->type = uniform_info->type;
+ Bucket* bucket = CreateBucket(name_bucket_id);
+ bucket->SetFromString(uniform_info->name.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetActiveAttrib(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetActiveAttrib& c =
+ *static_cast<const gles2::cmds::GetActiveAttrib*>(cmd_data);
+ GLuint program_id = c.program;
+ GLuint index = c.index;
+ uint32 name_bucket_id = c.name_bucket_id;
+ typedef cmds::GetActiveAttrib::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->success != 0) {
+ return error::kInvalidArguments;
+ }
+ Program* program = GetProgramInfoNotShader(
+ program_id, "glGetActiveAttrib");
+ if (!program) {
+ return error::kNoError;
+ }
+ const Program::VertexAttrib* attrib_info =
+ program->GetAttribInfo(index);
+ if (!attrib_info) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetActiveAttrib", "index out of range");
+ return error::kNoError;
+ }
+ result->success = 1; // true.
+ result->size = attrib_info->size;
+ result->type = attrib_info->type;
+ Bucket* bucket = CreateBucket(name_bucket_id);
+ bucket->SetFromString(attrib_info->name.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleShaderBinary(uint32 immediate_data_size,
+ const void* cmd_data) {
+#if 1 // No binary shader support.
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glShaderBinary", "not supported");
+ return error::kNoError;
+#else
+ GLsizei n = static_cast<GLsizei>(c.n);
+ if (n < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "n < 0");
+ return error::kNoError;
+ }
+ GLsizei length = static_cast<GLsizei>(c.length);
+ if (length < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "length < 0");
+ return error::kNoError;
+ }
+ uint32 data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
+ c.shaders_shm_id, c.shaders_shm_offset, data_size);
+ GLenum binaryformat = static_cast<GLenum>(c.binaryformat);
+ const void* binary = GetSharedMemoryAs<const void*>(
+ c.binary_shm_id, c.binary_shm_offset, length);
+ if (shaders == NULL || binary == NULL) {
+ return error::kOutOfBounds;
+ }
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ Shader* shader = GetShader(shaders[ii]);
+ if (!shader) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glShaderBinary", "unknown shader");
+ return error::kNoError;
+ }
+ service_ids[ii] = shader->service_id();
+ }
+ // TODO(gman): call glShaderBinary
+ return error::kNoError;
+#endif
+}
+
+void GLES2DecoderImpl::DoSwapBuffers() {
+ bool is_offscreen = !!offscreen_target_frame_buffer_.get();
+
+ int this_frame_number = frame_number_++;
+ // TRACE_EVENT for gpu tests:
+ TRACE_EVENT_INSTANT2("test_gpu", "SwapBuffersLatency",
+ TRACE_EVENT_SCOPE_THREAD,
+ "GLImpl", static_cast<int>(gfx::GetGLImplementation()),
+ "width", (is_offscreen ? offscreen_size_.width() :
+ surface_->GetSize().width()));
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoSwapBuffers",
+ "offscreen", is_offscreen,
+ "frame", this_frame_number);
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("gpu.PresentingFrame");
+ }
+
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ &is_tracing);
+ if (is_tracing) {
+ ScopedFrameBufferBinder binder(this, GetBackbufferServiceId());
+ gpu_state_tracer_->TakeSnapshotWithCurrentFramebuffer(
+ is_offscreen ? offscreen_size_ : surface_->GetSize());
+ }
+
+ // If offscreen then don't actually SwapBuffers to the display. Just copy
+ // the rendered frame to another frame buffer.
+ if (is_offscreen) {
+ TRACE_EVENT2("gpu", "Offscreen",
+ "width", offscreen_size_.width(), "height", offscreen_size_.height());
+ if (offscreen_size_ != offscreen_saved_color_texture_->size()) {
+ // Workaround for NVIDIA driver bug on OS X; crbug.com/89557,
+ // crbug.com/94163. TODO(kbr): figure out reproduction so Apple will
+ // fix this.
+ if (workarounds().needs_offscreen_buffer_workaround) {
+ offscreen_saved_frame_buffer_->Create();
+ glFinish();
+ }
+
+ // Allocate the offscreen saved color texture.
+ DCHECK(offscreen_saved_color_format_);
+ offscreen_saved_color_texture_->AllocateStorage(
+ offscreen_size_, offscreen_saved_color_format_, false);
+
+ offscreen_saved_frame_buffer_->AttachRenderTexture(
+ offscreen_saved_color_texture_.get());
+ if (offscreen_size_.width() != 0 && offscreen_size_.height() != 0) {
+ if (offscreen_saved_frame_buffer_->CheckStatus() !=
+ GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFrameBuffer failed "
+ << "because offscreen saved FBO was incomplete.";
+ LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ return;
+ }
+
+ // Clear the offscreen color texture.
+ // TODO(piman): Is this still necessary?
+ {
+ ScopedFrameBufferBinder binder(this,
+ offscreen_saved_frame_buffer_->id());
+ glClearColor(0, 0, 0, 0);
+ state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
+ glClear(GL_COLOR_BUFFER_BIT);
+ RestoreClearState();
+ }
+ }
+
+ UpdateParentTextureInfo();
+ }
+
+ if (offscreen_size_.width() == 0 || offscreen_size_.height() == 0)
+ return;
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoSwapBuffers", GetErrorState());
+
+ if (IsOffscreenBufferMultisampled()) {
+ // For multisampled buffers, resolve the frame buffer.
+ ScopedResolvedFrameBufferBinder binder(this, true, false);
+ } else {
+ ScopedFrameBufferBinder binder(this,
+ offscreen_target_frame_buffer_->id());
+
+ if (offscreen_target_buffer_preserved_) {
+ // Copy the target frame buffer to the saved offscreen texture.
+ offscreen_saved_color_texture_->Copy(
+ offscreen_saved_color_texture_->size(),
+ offscreen_saved_color_format_);
+ } else {
+ // Flip the textures in the parent context via the texture manager.
+ if (!!offscreen_saved_color_texture_info_.get())
+ offscreen_saved_color_texture_info_->texture()->
+ SetServiceId(offscreen_target_color_texture_->id());
+
+ offscreen_saved_color_texture_.swap(offscreen_target_color_texture_);
+ offscreen_target_frame_buffer_->AttachRenderTexture(
+ offscreen_target_color_texture_.get());
+ }
+
+ // Ensure the side effects of the copy are visible to the parent
+ // context. There is no need to do this for ANGLE because it uses a
+ // single D3D device for all contexts.
+ if (!feature_info_->feature_flags().is_angle)
+ glFlush();
+ }
+ } else {
+ if (!surface_->SwapBuffers()) {
+ LOG(ERROR) << "Context lost because SwapBuffers failed.";
+ LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ }
+ }
+
+ // This may be a slow command. Exit command processing to allow for
+ // context preemption and GPU watchdog checks.
+ ExitCommandProcessingEarly();
+}
+
+error::Error GLES2DecoderImpl::HandleEnableFeatureCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EnableFeatureCHROMIUM& c =
+ *static_cast<const gles2::cmds::EnableFeatureCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ typedef cmds::EnableFeatureCHROMIUM::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (*result != 0) {
+ return error::kInvalidArguments;
+ }
+ std::string feature_str;
+ if (!bucket->GetAsString(&feature_str)) {
+ return error::kInvalidArguments;
+ }
+
+ // TODO(gman): make this some kind of table to function pointer thingy.
+ if (feature_str.compare("pepper3d_allow_buffers_on_multiple_targets") == 0) {
+ buffer_manager()->set_allow_buffers_on_multiple_targets(true);
+ } else if (feature_str.compare("pepper3d_support_fixed_attribs") == 0) {
+ buffer_manager()->set_allow_buffers_on_multiple_targets(true);
+ // TODO(gman): decide how to remove the need for this const_cast.
+ // I could make validators_ non const but that seems bad as this is the only
+ // place it is needed. I could make some special friend class of validators
+ // just to allow this to set them. That seems silly. I could refactor this
+ // code to use the extension mechanism or the initialization attributes to
+ // turn this feature on. Given that the only real point of this is to make
+ // the conformance tests pass and given that there is lots of real work that
+ // needs to be done it seems like refactoring for one to one of those
+ // methods is a very low priority.
+ const_cast<Validators*>(validators_)->vertex_attrib_type.AddValue(GL_FIXED);
+ } else if (feature_str.compare("webgl_enable_glsl_webgl_validation") == 0) {
+ force_webgl_glsl_validation_ = true;
+ InitializeShaderTranslator();
+ } else {
+ return error::kNoError;
+ }
+
+ *result = 1; // true.
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetRequestableExtensionsCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetRequestableExtensionsCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetRequestableExtensionsCHROMIUM*>(
+ cmd_data);
+ Bucket* bucket = CreateBucket(c.bucket_id);
+ scoped_refptr<FeatureInfo> info(new FeatureInfo());
+ info->Initialize(disallowed_features_);
+ bucket->SetFromString(info->extensions().c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RequestExtensionCHROMIUM& c =
+ *static_cast<const gles2::cmds::RequestExtensionCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string feature_str;
+ if (!bucket->GetAsString(&feature_str)) {
+ return error::kInvalidArguments;
+ }
+
+ bool desire_webgl_glsl_validation =
+ feature_str.find("GL_CHROMIUM_webglsl") != std::string::npos;
+ bool desire_standard_derivatives = false;
+ bool desire_frag_depth = false;
+ bool desire_draw_buffers = false;
+ bool desire_shader_texture_lod = false;
+ if (force_webgl_glsl_validation_) {
+ desire_standard_derivatives =
+ feature_str.find("GL_OES_standard_derivatives") != std::string::npos;
+ desire_frag_depth =
+ feature_str.find("GL_EXT_frag_depth") != std::string::npos;
+ desire_draw_buffers =
+ feature_str.find("GL_EXT_draw_buffers") != std::string::npos;
+ desire_shader_texture_lod =
+ feature_str.find("GL_EXT_shader_texture_lod") != std::string::npos;
+ }
+
+ if (desire_webgl_glsl_validation != force_webgl_glsl_validation_ ||
+ desire_standard_derivatives != derivatives_explicitly_enabled_ ||
+ desire_frag_depth != frag_depth_explicitly_enabled_ ||
+ desire_draw_buffers != draw_buffers_explicitly_enabled_) {
+ force_webgl_glsl_validation_ |= desire_webgl_glsl_validation;
+ derivatives_explicitly_enabled_ |= desire_standard_derivatives;
+ frag_depth_explicitly_enabled_ |= desire_frag_depth;
+ draw_buffers_explicitly_enabled_ |= desire_draw_buffers;
+ shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod;
+ InitializeShaderTranslator();
+ }
+
+ UpdateCapabilities();
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetMultipleIntegervCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetMultipleIntegervCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetMultipleIntegervCHROMIUM*>(cmd_data);
+ GLuint count = c.count;
+ uint32 pnames_size;
+ if (!SafeMultiplyUint32(count, sizeof(GLenum), &pnames_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* pnames = GetSharedMemoryAs<const GLenum*>(
+ c.pnames_shm_id, c.pnames_shm_offset, pnames_size);
+ if (pnames == NULL) {
+ return error::kOutOfBounds;
+ }
+
+ // We have to copy them since we use them twice so the client
+ // can't change them between the time we validate them and the time we use
+ // them.
+ scoped_ptr<GLenum[]> enums(new GLenum[count]);
+ memcpy(enums.get(), pnames, pnames_size);
+
+ // Count up the space needed for the result.
+ uint32 num_results = 0;
+ for (GLuint ii = 0; ii < count; ++ii) {
+ uint32 num = util_.GLGetNumValuesReturned(enums[ii]);
+ if (num == 0) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetMultipleCHROMIUM", enums[ii], "pname");
+ return error::kNoError;
+ }
+ // Num will never be more than 4.
+ DCHECK_LE(num, 4u);
+ if (!SafeAddUint32(num_results, num, &num_results)) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ uint32 result_size = 0;
+ if (!SafeMultiplyUint32(num_results, sizeof(GLint), &result_size)) {
+ return error::kOutOfBounds;
+ }
+
+ if (result_size != static_cast<uint32>(c.size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glGetMultipleCHROMIUM", "bad size GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+
+ GLint* results = GetSharedMemoryAs<GLint*>(
+ c.results_shm_id, c.results_shm_offset, result_size);
+ if (results == NULL) {
+ return error::kOutOfBounds;
+ }
+
+ // Check the results have been cleared in case the context was lost.
+ for (uint32 ii = 0; ii < num_results; ++ii) {
+ if (results[ii]) {
+ return error::kInvalidArguments;
+ }
+ }
+
+ // Get each result.
+ GLint* start = results;
+ for (GLuint ii = 0; ii < count; ++ii) {
+ GLsizei num_written = 0;
+ if (!state_.GetStateAsGLint(enums[ii], results, &num_written) &&
+ !GetHelper(enums[ii], results, &num_written)) {
+ DoGetIntegerv(enums[ii], results);
+ }
+ results += num_written;
+ }
+
+ // Just to verify. Should this be a DCHECK?
+ if (static_cast<uint32>(results - start) != num_results) {
+ return error::kOutOfBounds;
+ }
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramInfoCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramInfoCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetProgramInfoCHROMIUM*>(cmd_data);
+ GLuint program_id = static_cast<GLuint>(c.program);
+ uint32 bucket_id = c.bucket_id;
+ Bucket* bucket = CreateBucket(bucket_id);
+ bucket->SetSize(sizeof(ProgramInfoHeader)); // in case we fail.
+ Program* program = NULL;
+ program = GetProgram(program_id);
+ if (!program || !program->IsValid()) {
+ return error::kNoError;
+ }
+ program->GetProgramInfo(program_manager(), bucket);
+ return error::kNoError;
+}
+
+error::ContextLostReason GLES2DecoderImpl::GetContextLostReason() {
+ switch (reset_status_) {
+ case GL_NO_ERROR:
+ // TODO(kbr): improve the precision of the error code in this case.
+ // Consider delegating to context for error code if MakeCurrent fails.
+ return error::kUnknown;
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ return error::kGuilty;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ return error::kInnocent;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ return error::kUnknown;
+ }
+
+ NOTREACHED();
+ return error::kUnknown;
+}
+
+bool GLES2DecoderImpl::WasContextLost() {
+ if (reset_status_ != GL_NO_ERROR) {
+ return true;
+ }
+ if (context_->WasAllocatedUsingRobustnessExtension()) {
+ GLenum status = GL_NO_ERROR;
+ if (has_robustness_extension_)
+ status = glGetGraphicsResetStatusARB();
+ if (status != GL_NO_ERROR) {
+ // The graphics card was reset. Signal a lost context to the application.
+ reset_status_ = status;
+ reset_by_robustness_extension_ = true;
+ LOG(ERROR) << (surface_->IsOffscreen() ? "Offscreen" : "Onscreen")
+ << " context lost via ARB/EXT_robustness. Reset status = "
+ << GLES2Util::GetStringEnum(status);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool GLES2DecoderImpl::WasContextLostByRobustnessExtension() {
+ return WasContextLost() && reset_by_robustness_extension_;
+}
+
+void GLES2DecoderImpl::LoseContext(uint32 reset_status) {
+ // Only loses the context once.
+ if (reset_status_ != GL_NO_ERROR) {
+ return;
+ }
+
+ // Marks this context as lost.
+ reset_status_ = reset_status;
+ current_decoder_error_ = error::kLostContext;
+}
+
+error::Error GLES2DecoderImpl::HandleInsertSyncPointCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitSyncPointCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::WaitSyncPointCHROMIUM& c =
+ *static_cast<const gles2::cmds::WaitSyncPointCHROMIUM*>(cmd_data);
+ group_->mailbox_manager()->PullTextureUpdates();
+ if (wait_sync_point_callback_.is_null())
+ return error::kNoError;
+
+ return wait_sync_point_callback_.Run(c.sync_point) ?
+ error::kNoError : error::kDeferCommandUntilLater;
+}
+
+error::Error GLES2DecoderImpl::HandleDiscardBackbufferCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ if (surface_->DeferDraws())
+ return error::kDeferCommandUntilLater;
+ if (!surface_->SetBackbufferAllocation(false))
+ return error::kLostContext;
+ backbuffer_needs_clear_bits_ |= GL_COLOR_BUFFER_BIT;
+ backbuffer_needs_clear_bits_ |= GL_DEPTH_BUFFER_BIT;
+ backbuffer_needs_clear_bits_ |= GL_STENCIL_BUFFER_BIT;
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GenQueriesEXTHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (query_manager_->GetQuery(client_ids[ii])) {
+ return false;
+ }
+ }
+ query_manager_->GenQueries(n, client_ids);
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteQueriesEXTHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ QueryManager::Query* query = query_manager_->GetQuery(client_ids[ii]);
+ if (query && !query->IsDeleted()) {
+ ContextState::QueryMap::iterator it =
+ state_.current_queries.find(query->target());
+ if (it != state_.current_queries.end())
+ state_.current_queries.erase(it);
+
+ query->Destroy(true);
+ }
+ query_manager_->RemoveQuery(client_ids[ii]);
+ }
+}
+
+bool GLES2DecoderImpl::ProcessPendingQueries() {
+ if (query_manager_.get() == NULL) {
+ return false;
+ }
+ if (!query_manager_->ProcessPendingQueries()) {
+ current_decoder_error_ = error::kOutOfBounds;
+ }
+ return query_manager_->HavePendingQueries();
+}
+
+// Note that if there are no pending readpixels right now,
+// this function will call the callback immediately.
+void GLES2DecoderImpl::WaitForReadPixels(base::Closure callback) {
+ if (features().use_async_readpixels && !pending_readpixel_fences_.empty()) {
+ pending_readpixel_fences_.back()->callbacks.push_back(callback);
+ } else {
+ callback.Run();
+ }
+}
+
+void GLES2DecoderImpl::ProcessPendingReadPixels() {
+ while (!pending_readpixel_fences_.empty() &&
+ pending_readpixel_fences_.front()->fence->HasCompleted()) {
+ std::vector<base::Closure> callbacks =
+ pending_readpixel_fences_.front()->callbacks;
+ pending_readpixel_fences_.pop();
+ for (size_t i = 0; i < callbacks.size(); i++) {
+ callbacks[i].Run();
+ }
+ }
+}
+
+bool GLES2DecoderImpl::HasMoreIdleWork() {
+ return !pending_readpixel_fences_.empty() ||
+ async_pixel_transfer_manager_->NeedsProcessMorePendingTransfers();
+}
+
+void GLES2DecoderImpl::PerformIdleWork() {
+ ProcessPendingReadPixels();
+ if (!async_pixel_transfer_manager_->NeedsProcessMorePendingTransfers())
+ return;
+ async_pixel_transfer_manager_->ProcessMorePendingTransfers();
+ ProcessFinishedAsyncTransfers();
+}
+
+error::Error GLES2DecoderImpl::HandleBeginQueryEXT(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BeginQueryEXT& c =
+ *static_cast<const gles2::cmds::BeginQueryEXT*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint client_id = static_cast<GLuint>(c.id);
+ int32 sync_shm_id = static_cast<int32>(c.sync_data_shm_id);
+ uint32 sync_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ switch (target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ case GL_LATENCY_QUERY_CHROMIUM:
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ break;
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ if (!features().chromium_sync_query) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for commands completed queries");
+ return error::kNoError;
+ }
+ break;
+ default:
+ if (!features().occlusion_query_boolean) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for occlusion queries");
+ return error::kNoError;
+ }
+ break;
+ }
+
+ if (state_.current_queries.find(target) != state_.current_queries.end()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress");
+ return error::kNoError;
+ }
+
+ if (client_id == 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
+ return error::kNoError;
+ }
+
+ QueryManager::Query* query = query_manager_->GetQuery(client_id);
+ if (!query) {
+ if (!query_manager_->IsValidQuery(client_id)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBeginQueryEXT",
+ "id not made by glGenQueriesEXT");
+ return error::kNoError;
+ }
+ query = query_manager_->CreateQuery(
+ target, client_id, sync_shm_id, sync_shm_offset);
+ }
+
+ if (query->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match");
+ return error::kNoError;
+ } else if (query->shm_id() != sync_shm_id ||
+ query->shm_offset() != sync_shm_offset) {
+ DLOG(ERROR) << "Shared memory used by query not the same as before";
+ return error::kInvalidArguments;
+ }
+
+ if (!query_manager_->BeginQuery(query)) {
+ return error::kOutOfBounds;
+ }
+
+ state_.current_queries[target] = query;
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEndQueryEXT(uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EndQueryEXT& c =
+ *static_cast<const gles2::cmds::EndQueryEXT*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32 submit_count = static_cast<GLuint>(c.submit_count);
+ ContextState::QueryMap::iterator it = state_.current_queries.find(target);
+
+ if (it == state_.current_queries.end()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glEndQueryEXT", "No active query");
+ return error::kNoError;
+ }
+
+ QueryManager::Query* query = it->second.get();
+ if (!query_manager_->EndQuery(query, submit_count)) {
+ return error::kOutOfBounds;
+ }
+
+ query_manager_->ProcessPendingTransferQueries();
+
+ state_.current_queries.erase(it);
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::GenVertexArraysOESHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetVertexAttribManager(client_ids[ii])) {
+ return false;
+ }
+ }
+
+ if (!features().native_vertex_array_object) {
+ // Emulated VAO
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateVertexAttribManager(client_ids[ii], 0, true);
+ }
+ } else {
+ scoped_ptr<GLuint[]> service_ids(new GLuint[n]);
+
+ glGenVertexArraysOES(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateVertexAttribManager(client_ids[ii], service_ids[ii], true);
+ }
+ }
+
+ return true;
+}
+
+void GLES2DecoderImpl::DeleteVertexArraysOESHelper(
+ GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ VertexAttribManager* vao =
+ GetVertexAttribManager(client_ids[ii]);
+ if (vao && !vao->IsDeleted()) {
+ if (state_.vertex_attrib_manager.get() == vao) {
+ DoBindVertexArrayOES(0);
+ }
+ RemoveVertexAttribManager(client_ids[ii]);
+ }
+ }
+}
+
+void GLES2DecoderImpl::DoBindVertexArrayOES(GLuint client_id) {
+ VertexAttribManager* vao = NULL;
+ if (client_id != 0) {
+ vao = GetVertexAttribManager(client_id);
+ if (!vao) {
+ // Unlike most Bind* methods, the spec explicitly states that VertexArray
+ // only allows names that have been previously generated. As such, we do
+ // not generate new names here.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindVertexArrayOES", "bad vertex array id.");
+ current_decoder_error_ = error::kNoError;
+ return;
+ }
+ } else {
+ vao = state_.default_vertex_attrib_manager.get();
+ }
+
+ // Only set the VAO state if it's changed
+ if (state_.vertex_attrib_manager.get() != vao) {
+ state_.vertex_attrib_manager = vao;
+ if (!features().native_vertex_array_object) {
+ EmulateVertexArrayState();
+ } else {
+ GLuint service_id = vao->service_id();
+ glBindVertexArrayOES(service_id);
+ }
+ }
+}
+
+// Used when OES_vertex_array_object isn't natively supported
+void GLES2DecoderImpl::EmulateVertexArrayState() {
+ // Setup the Vertex attribute state
+ for (uint32 vv = 0; vv < group_->max_vertex_attribs(); ++vv) {
+ RestoreStateForAttrib(vv, true);
+ }
+
+ // Setup the element buffer
+ Buffer* element_array_buffer =
+ state_.vertex_attrib_manager->element_array_buffer();
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer ? element_array_buffer->service_id() : 0);
+}
+
+bool GLES2DecoderImpl::DoIsVertexArrayOES(GLuint client_id) {
+ const VertexAttribManager* vao =
+ GetVertexAttribManager(client_id);
+ return vao && vao->IsValid() && !vao->IsDeleted();
+}
+
+#if defined(OS_MACOSX)
+void GLES2DecoderImpl::ReleaseIOSurfaceForTexture(GLuint texture_id) {
+ TextureToIOSurfaceMap::iterator it = texture_to_io_surface_map_.find(
+ texture_id);
+ if (it != texture_to_io_surface_map_.end()) {
+ // Found a previous IOSurface bound to this texture; release it.
+ IOSurfaceRef surface = it->second;
+ CFRelease(surface);
+ texture_to_io_surface_map_.erase(it);
+ }
+}
+#endif
+
+void GLES2DecoderImpl::DoTexImageIOSurface2DCHROMIUM(
+ GLenum target, GLsizei width, GLsizei height,
+ GLuint io_surface_id, GLuint plane) {
+#if defined(OS_MACOSX)
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "only supported on desktop GL.");
+ return;
+ }
+
+ if (target != GL_TEXTURE_RECTANGLE_ARB) {
+ // This might be supported in the future, and if we could require
+ // support for binding an IOSurface to a NPOT TEXTURE_2D texture, we
+ // could delete a lot of code. For now, perform strict validation so we
+ // know what's going on.
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM",
+ "requires TEXTURE_RECTANGLE_ARB target");
+ return;
+ }
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "no rectangle texture bound");
+ return;
+ }
+
+ // Look up the new IOSurface. Note that because of asynchrony
+ // between processes this might fail; during live resizing the
+ // plugin process might allocate and release an IOSurface before
+ // this process gets a chance to look it up. Hold on to any old
+ // IOSurface in this case.
+ IOSurfaceRef surface = IOSurfaceLookup(io_surface_id);
+ if (!surface) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "no IOSurface with the given ID");
+ return;
+ }
+
+ // Release any IOSurface previously bound to this texture.
+ ReleaseIOSurfaceForTexture(texture_ref->service_id());
+
+ // Make sure we release the IOSurface even if CGLTexImageIOSurface2D fails.
+ texture_to_io_surface_map_.insert(
+ std::make_pair(texture_ref->service_id(), surface));
+
+ CGLContextObj context =
+ static_cast<CGLContextObj>(context_->GetHandle());
+
+ CGLError err = CGLTexImageIOSurface2D(
+ context,
+ target,
+ GL_RGBA,
+ width,
+ height,
+ GL_BGRA,
+ GL_UNSIGNED_INT_8_8_8_8_REV,
+ surface,
+ plane);
+
+ if (err != kCGLNoError) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "error in CGLTexImageIOSurface2D");
+ return;
+ }
+
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, width, height, 1, 0,
+ GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, true);
+
+#else
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glTexImageIOSurface2DCHROMIUM", "not supported.");
+#endif
+}
+
+static GLenum ExtractFormatFromStorageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB565:
+ return GL_RGB;
+ case GL_RGBA4:
+ return GL_RGBA;
+ case GL_RGB5_A1:
+ return GL_RGBA;
+ case GL_RGB8_OES:
+ return GL_RGB;
+ case GL_RGBA8_OES:
+ return GL_RGBA;
+ case GL_LUMINANCE8_ALPHA8_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_LUMINANCE8_EXT:
+ return GL_LUMINANCE;
+ case GL_ALPHA8_EXT:
+ return GL_ALPHA;
+ case GL_RGBA32F_EXT:
+ return GL_RGBA;
+ case GL_RGB32F_EXT:
+ return GL_RGB;
+ case GL_ALPHA32F_EXT:
+ return GL_ALPHA;
+ case GL_LUMINANCE32F_EXT:
+ return GL_LUMINANCE;
+ case GL_LUMINANCE_ALPHA32F_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_RGBA16F_EXT:
+ return GL_RGBA;
+ case GL_RGB16F_EXT:
+ return GL_RGB;
+ case GL_ALPHA16F_EXT:
+ return GL_ALPHA;
+ case GL_LUMINANCE16F_EXT:
+ return GL_LUMINANCE;
+ case GL_LUMINANCE_ALPHA16F_EXT:
+ return GL_LUMINANCE_ALPHA;
+ case GL_BGRA8_EXT:
+ return GL_BGRA_EXT;
+ default:
+ return GL_NONE;
+ }
+}
+
+void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
+ GLenum target, GLuint source_id, GLuint dest_id, GLint level,
+ GLenum internal_format, GLenum dest_type) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCopyTextureCHROMIUM");
+
+ TextureRef* dest_texture_ref = GetTexture(dest_id);
+ TextureRef* source_texture_ref = GetTexture(source_id);
+
+ if (!source_texture_ref || !dest_texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "unknown texture id");
+ return;
+ }
+
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "invalid texture target");
+ return;
+ }
+
+ Texture* source_texture = source_texture_ref->texture();
+ Texture* dest_texture = dest_texture_ref->texture();
+ if (dest_texture->target() != GL_TEXTURE_2D ||
+ (source_texture->target() != GL_TEXTURE_2D &&
+ source_texture->target() != GL_TEXTURE_RECTANGLE_ARB &&
+ source_texture->target() != GL_TEXTURE_EXTERNAL_OES)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureCHROMIUM",
+ "invalid texture target binding");
+ return;
+ }
+
+ int source_width, source_height, dest_width, dest_height;
+
+ gfx::GLImage* image =
+ source_texture->GetLevelImage(source_texture->target(), 0);
+ if (image) {
+ gfx::Size size = image->GetSize();
+ source_width = size.width();
+ source_height = size.height();
+ if (source_width <= 0 || source_height <= 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glCopyTextureChromium", "invalid image size");
+ return;
+ }
+ } else {
+ if (!source_texture->GetLevelSize(
+ source_texture->target(), 0, &source_width, &source_height)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureChromium",
+ "source texture has no level 0");
+ return;
+ }
+
+ // Check that this type of texture is allowed.
+ if (!texture_manager()->ValidForTarget(
+ source_texture->target(), level, source_width, source_height, 1)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCopyTextureCHROMIUM", "Bad dimensions");
+ return;
+ }
+ }
+
+ // Clear the source texture if necessary.
+ if (!texture_manager()->ClearTextureLevel(
+ this, source_texture_ref, source_texture->target(), 0)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glCopyTextureCHROMIUM", "dimensions too big");
+ return;
+ }
+
+ GLenum source_type = 0;
+ GLenum source_internal_format = 0;
+ source_texture->GetLevelType(
+ source_texture->target(), 0, &source_type, &source_internal_format);
+
+ // The destination format should be GL_RGB, or GL_RGBA. GL_ALPHA,
+ // GL_LUMINANCE, and GL_LUMINANCE_ALPHA are not supported because they are not
+ // renderable on some platforms.
+ bool valid_dest_format = internal_format == GL_RGB ||
+ internal_format == GL_RGBA ||
+ internal_format == GL_BGRA_EXT;
+ bool valid_source_format = source_internal_format == GL_ALPHA ||
+ source_internal_format == GL_RGB ||
+ source_internal_format == GL_RGBA ||
+ source_internal_format == GL_LUMINANCE ||
+ source_internal_format == GL_LUMINANCE_ALPHA ||
+ source_internal_format == GL_BGRA_EXT;
+ if (!valid_source_format || !valid_dest_format) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glCopyTextureCHROMIUM",
+ "invalid internal format");
+ return;
+ }
+
+ // Defer initializing the CopyTextureCHROMIUMResourceManager until it is
+ // needed because it takes 10s of milliseconds to initialize.
+ if (!copy_texture_CHROMIUM_.get()) {
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTextureCHROMIUM");
+ copy_texture_CHROMIUM_.reset(new CopyTextureCHROMIUMResourceManager());
+ copy_texture_CHROMIUM_->Initialize(this);
+ RestoreCurrentFramebufferBindings();
+ if (LOCAL_PEEK_GL_ERROR("glCopyTextureCHROMIUM") != GL_NO_ERROR)
+ return;
+ }
+
+ GLenum dest_type_previous = dest_type;
+ GLenum dest_internal_format = internal_format;
+ bool dest_level_defined = dest_texture->GetLevelSize(
+ GL_TEXTURE_2D, level, &dest_width, &dest_height);
+
+ if (dest_level_defined) {
+ dest_texture->GetLevelType(GL_TEXTURE_2D, level, &dest_type_previous,
+ &dest_internal_format);
+ }
+
+ // Resize the destination texture to the dimensions of the source texture.
+ if (!dest_level_defined || dest_width != source_width ||
+ dest_height != source_height ||
+ dest_internal_format != internal_format ||
+ dest_type_previous != dest_type) {
+ // Ensure that the glTexImage2D succeeds.
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopyTextureCHROMIUM");
+ glBindTexture(GL_TEXTURE_2D, dest_texture->service_id());
+ glTexImage2D(
+ GL_TEXTURE_2D, level, internal_format, source_width, source_height,
+ 0, internal_format, dest_type, NULL);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glCopyTextureCHROMIUM");
+ if (error != GL_NO_ERROR) {
+ RestoreCurrentTextureBindings(&state_, GL_TEXTURE_2D);
+ return;
+ }
+
+ texture_manager()->SetLevelInfo(
+ dest_texture_ref, GL_TEXTURE_2D, level, internal_format, source_width,
+ source_height, 1, 0, internal_format, dest_type, true);
+ } else {
+ texture_manager()->SetLevelCleared(
+ dest_texture_ref, GL_TEXTURE_2D, level, true);
+ }
+
+ ScopedModifyPixels modify(dest_texture_ref);
+
+ // Try using GLImage::CopyTexImage when possible.
+ bool unpack_premultiply_alpha_change =
+ unpack_premultiply_alpha_ ^ unpack_unpremultiply_alpha_;
+ if (image && !unpack_flip_y_ && !unpack_premultiply_alpha_change && !level) {
+ glBindTexture(GL_TEXTURE_2D, dest_texture->service_id());
+ if (image->CopyTexImage(GL_TEXTURE_2D))
+ return;
+ }
+
+ DoWillUseTexImageIfNeeded(source_texture, source_texture->target());
+
+ // GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix
+ // before presenting.
+ if (source_texture->target() == GL_TEXTURE_EXTERNAL_OES) {
+ // TODO(hkuang): get the StreamTexture transform matrix in GPU process
+ // instead of using default matrix crbug.com/226218.
+ const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+ copy_texture_CHROMIUM_->DoCopyTextureWithTransform(
+ this,
+ source_texture->target(),
+ source_texture->service_id(),
+ dest_texture->service_id(),
+ level,
+ source_width,
+ source_height,
+ unpack_flip_y_,
+ unpack_premultiply_alpha_,
+ unpack_unpremultiply_alpha_,
+ default_matrix);
+ } else {
+ copy_texture_CHROMIUM_->DoCopyTexture(this,
+ source_texture->target(),
+ source_texture->service_id(),
+ source_internal_format,
+ dest_texture->service_id(),
+ level,
+ internal_format,
+ source_width,
+ source_height,
+ unpack_flip_y_,
+ unpack_premultiply_alpha_,
+ unpack_unpremultiply_alpha_);
+ }
+
+ DoDidUseTexImageIfNeeded(source_texture, source_texture->target());
+}
+
+static GLenum ExtractTypeFromStorageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB565:
+ return GL_UNSIGNED_SHORT_5_6_5;
+ case GL_RGBA4:
+ return GL_UNSIGNED_SHORT_4_4_4_4;
+ case GL_RGB5_A1:
+ return GL_UNSIGNED_SHORT_5_5_5_1;
+ case GL_RGB8_OES:
+ return GL_UNSIGNED_BYTE;
+ case GL_RGBA8_OES:
+ return GL_UNSIGNED_BYTE;
+ case GL_LUMINANCE8_ALPHA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_LUMINANCE8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_ALPHA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ case GL_RGBA32F_EXT:
+ return GL_FLOAT;
+ case GL_RGB32F_EXT:
+ return GL_FLOAT;
+ case GL_ALPHA32F_EXT:
+ return GL_FLOAT;
+ case GL_LUMINANCE32F_EXT:
+ return GL_FLOAT;
+ case GL_LUMINANCE_ALPHA32F_EXT:
+ return GL_FLOAT;
+ case GL_RGBA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_RGB16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_ALPHA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_LUMINANCE16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_LUMINANCE_ALPHA16F_EXT:
+ return GL_HALF_FLOAT_OES;
+ case GL_BGRA8_EXT:
+ return GL_UNSIGNED_BYTE;
+ default:
+ return GL_NONE;
+ }
+}
+
+void GLES2DecoderImpl::DoTexStorage2DEXT(
+ GLenum target,
+ GLint levels,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoTexStorage2DEXT",
+ "width", width, "height", height);
+ if (!texture_manager()->ValidForTarget(target, 0, width, height, 1) ||
+ TextureManager::ComputeMipMapCount(target, width, height, 1) < levels) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexStorage2DEXT", "dimensions out of range");
+ return;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexStorage2DEXT", "unknown texture for target");
+ return;
+ }
+ Texture* texture = texture_ref->texture();
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ if (texture->IsImmutable()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTexStorage2DEXT", "texture is immutable");
+ return;
+ }
+
+ GLenum format = ExtractFormatFromStorageFormat(internal_format);
+ GLenum type = ExtractTypeFromStorageFormat(internal_format);
+
+ {
+ GLsizei level_width = width;
+ GLsizei level_height = height;
+ uint32 estimated_size = 0;
+ for (int ii = 0; ii < levels; ++ii) {
+ uint32 level_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ level_width, level_height, format, type, state_.unpack_alignment,
+ &estimated_size, NULL, NULL) ||
+ !SafeAddUint32(estimated_size, level_size, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexStorage2DEXT", "dimensions too large");
+ return;
+ }
+ level_width = std::max(1, level_width >> 1);
+ level_height = std::max(1, level_height >> 1);
+ }
+ if (!EnsureGPUMemoryAvailable(estimated_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glTexStorage2DEXT", "out of memory");
+ return;
+ }
+ }
+
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glTexStorage2DEXT");
+ glTexStorage2DEXT(target, levels, internal_format, width, height);
+ GLenum error = LOCAL_PEEK_GL_ERROR("glTexStorage2DEXT");
+ if (error == GL_NO_ERROR) {
+ GLsizei level_width = width;
+ GLsizei level_height = height;
+ for (int ii = 0; ii < levels; ++ii) {
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, ii, format,
+ level_width, level_height, 1, 0, format, type, false);
+ level_width = std::max(1, level_width >> 1);
+ level_height = std::max(1, level_height >> 1);
+ }
+ texture->SetImmutable(true);
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleGenMailboxCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+void GLES2DecoderImpl::DoProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoProduceTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ ProduceTextureRef("glProduceTextureCHROMIUM", texture_ref, target, data);
+}
+
+void GLES2DecoderImpl::DoProduceTextureDirectCHROMIUM(GLuint client_id,
+ GLenum target, const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoProduceTextureDirectCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+
+ ProduceTextureRef("glProduceTextureDirectCHROMIUM", GetTexture(client_id),
+ target, data);
+}
+
+void GLES2DecoderImpl::ProduceTextureRef(std::string func_name,
+ TextureRef* texture_ref, GLenum target, const GLbyte* data) {
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << func_name << " was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "unknown texture for target");
+ return;
+ }
+
+ Texture* produced = texture_manager()->Produce(texture_ref);
+ if (!produced) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "invalid texture");
+ return;
+ }
+
+ if (produced->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, func_name.c_str(), "invalid target");
+ return;
+ }
+
+ group_->mailbox_manager()->ProduceTexture(target, mailbox, produced);
+}
+
+void GLES2DecoderImpl::DoConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoConsumeTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << "ConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+
+ scoped_refptr<TextureRef> texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref.get()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM",
+ "unknown texture for target");
+ return;
+ }
+ GLuint client_id = texture_ref->client_id();
+ if (!client_id) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "unknown texture for target");
+ return;
+ }
+ Texture* texture = group_->mailbox_manager()->ConsumeTexture(target, mailbox);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "invalid mailbox name");
+ return;
+ }
+ if (texture->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glConsumeTextureCHROMIUM", "invalid target");
+ return;
+ }
+
+ DeleteTexturesHelper(1, &client_id);
+ texture_ref = texture_manager()->Consume(client_id, texture);
+ glBindTexture(target, texture_ref->service_id());
+
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ unit.bind_target = target;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ unit.bound_texture_2d = texture_ref;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ unit.bound_texture_cube_map = texture_ref;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ unit.bound_texture_external_oes = texture_ref;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ unit.bound_texture_rectangle_arb = texture_ref;
+ break;
+ default:
+ NOTREACHED(); // Validation should prevent us getting here.
+ break;
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleCreateAndConsumeTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate& c =
+ *static_cast<
+ const gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate*>(
+ cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCreateAndConsumeTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ uint32_t client_id = c.client_id;
+ DoCreateAndConsumeTextureCHROMIUM(target, mailbox, client_id);
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoCreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data, GLuint client_id) {
+ TRACE_EVENT2("gpu", "GLES2DecoderImpl::DoCreateAndConsumeTextureCHROMIUM",
+ "context", logger_.GetLogPrefix(),
+ "mailbox[0]", static_cast<unsigned char>(data[0]));
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DLOG_IF(ERROR, !mailbox.Verify()) << "CreateAndConsumeTextureCHROMIUM was "
+ "passed a mailbox that was not "
+ "generated by GenMailboxCHROMIUM.";
+
+ TextureRef* texture_ref = GetTexture(client_id);
+ if (texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "client id already in use");
+ return;
+ }
+ Texture* texture = group_->mailbox_manager()->ConsumeTexture(target, mailbox);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "invalid mailbox name");
+ return;
+ }
+ if (texture->target() != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glCreateAndConsumeTextureCHROMIUM", "invalid target");
+ return;
+ }
+
+ texture_ref = texture_manager()->Consume(client_id, texture);
+}
+
+void GLES2DecoderImpl::DoInsertEventMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ if (!marker) {
+ marker = "";
+ }
+ debug_marker_manager_.SetMarker(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2DecoderImpl::DoPushGroupMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ if (!marker) {
+ marker = "";
+ }
+ std::string name = length ? std::string(marker, length) : std::string(marker);
+ debug_marker_manager_.PushGroup(name);
+ gpu_tracer_->Begin(name, kTraceGroupMarker);
+}
+
+void GLES2DecoderImpl::DoPopGroupMarkerEXT(void) {
+ debug_marker_manager_.PopGroup();
+ gpu_tracer_->End(kTraceGroupMarker);
+}
+
+void GLES2DecoderImpl::DoBindTexImage2DCHROMIUM(
+ GLenum target, GLint image_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM");
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM,
+ "glBindTexImage2DCHROMIUM", "invalid target");
+ return;
+ }
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "no texture bound");
+ return;
+ }
+
+ gfx::GLImage* gl_image = image_manager()->LookupImage(image_id);
+ if (!gl_image) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "no image found with the given ID");
+ return;
+ }
+
+ {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM", GetErrorState());
+ if (!gl_image->BindTexImage(target)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glBindTexImage2DCHROMIUM", "fail to bind image with the given ID");
+ return;
+ }
+ }
+
+ gfx::Size size = gl_image->GetSize();
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, size.width(), size.height(), 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, true);
+ texture_manager()->SetLevelImage(texture_ref, target, 0, gl_image);
+}
+
+void GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM(
+ GLenum target, GLint image_id) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM");
+
+ // Default target might be conceptually valid, but disallow it to avoid
+ // accidents.
+ TextureRef* texture_ref =
+ texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glReleaseTexImage2DCHROMIUM", "no texture bound");
+ return;
+ }
+
+ gfx::GLImage* gl_image = image_manager()->LookupImage(image_id);
+ if (!gl_image) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glReleaseTexImage2DCHROMIUM", "no image found with the given ID");
+ return;
+ }
+
+ // Do nothing when image is not currently bound.
+ if (texture_ref->texture()->GetLevelImage(target, 0) != gl_image)
+ return;
+
+ {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM", GetErrorState());
+ gl_image->ReleaseTexImage(target);
+ }
+
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, GL_RGBA, 0, 0, 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, false);
+}
+
+error::Error GLES2DecoderImpl::HandleTraceBeginCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TraceBeginCHROMIUM& c =
+ *static_cast<const gles2::cmds::TraceBeginCHROMIUM*>(cmd_data);
+ Bucket* bucket = GetBucket(c.bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string command_name;
+ if (!bucket->GetAsString(&command_name)) {
+ return error::kInvalidArguments;
+ }
+ TRACE_EVENT_COPY_ASYNC_BEGIN0("gpu", command_name.c_str(), this);
+ if (!gpu_tracer_->Begin(command_name, kTraceCHROMIUM)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTraceBeginCHROMIUM", "unable to create begin trace");
+ return error::kNoError;
+ }
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoTraceEndCHROMIUM() {
+ if (gpu_tracer_->CurrentName().empty()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glTraceEndCHROMIUM", "no trace begin found");
+ return;
+ }
+ TRACE_EVENT_COPY_ASYNC_END0("gpu", gpu_tracer_->CurrentName().c_str(), this);
+ gpu_tracer_->End(kTraceCHROMIUM);
+}
+
+void GLES2DecoderImpl::DoDrawBuffersEXT(
+ GLsizei count, const GLenum* bufs) {
+ if (count > static_cast<GLsizei>(group_->max_draw_buffers())) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE,
+ "glDrawBuffersEXT", "greater than GL_MAX_DRAW_BUFFERS_EXT");
+ return;
+ }
+
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ if (framebuffer) {
+ for (GLsizei i = 0; i < count; ++i) {
+ if (bufs[i] != static_cast<GLenum>(GL_COLOR_ATTACHMENT0 + i) &&
+ bufs[i] != GL_NONE) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawBuffersEXT",
+ "bufs[i] not GL_NONE or GL_COLOR_ATTACHMENTi_EXT");
+ return;
+ }
+ }
+ glDrawBuffersARB(count, bufs);
+ framebuffer->SetDrawBuffers(count, bufs);
+ } else { // backbuffer
+ if (count > 1 ||
+ (bufs[0] != GL_BACK && bufs[0] != GL_NONE)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glDrawBuffersEXT",
+ "more than one buffer or bufs not GL_NONE or GL_BACK");
+ return;
+ }
+ GLenum mapped_buf = bufs[0];
+ if (GetBackbufferServiceId() != 0 && // emulated backbuffer
+ bufs[0] == GL_BACK) {
+ mapped_buf = GL_COLOR_ATTACHMENT0;
+ }
+ glDrawBuffersARB(count, &mapped_buf);
+ group_->set_draw_buffer(bufs[0]);
+ }
+}
+
+void GLES2DecoderImpl::DoLoseContextCHROMIUM(GLenum current, GLenum other) {
+ group_->LoseContexts(other);
+ reset_status_ = current;
+ current_decoder_error_ = error::kLostContext;
+}
+
+void GLES2DecoderImpl::DoMatrixLoadfCHROMIUM(GLenum matrix_mode,
+ const GLfloat* matrix) {
+ DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
+ matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadfCHROMIUM",
+ "function not available");
+ return;
+ }
+
+ GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
+ ? state_.projection_matrix
+ : state_.modelview_matrix;
+ memcpy(target_matrix, matrix, sizeof(GLfloat) * 16);
+ // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
+ // since the values of the _NV and _CHROMIUM tokens match.
+ glMatrixLoadfEXT(matrix_mode, matrix);
+}
+
+void GLES2DecoderImpl::DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode) {
+ DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
+ matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
+
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadIdentityCHROMIUM",
+ "function not available");
+ return;
+ }
+
+ static GLfloat kIdentityMatrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f};
+
+ GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
+ ? state_.projection_matrix
+ : state_.modelview_matrix;
+ memcpy(target_matrix, kIdentityMatrix, sizeof(kIdentityMatrix));
+ // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
+ // since the values of the _NV and _CHROMIUM tokens match.
+ glMatrixLoadIdentityEXT(matrix_mode);
+}
+
+bool GLES2DecoderImpl::ValidateAsyncTransfer(
+ const char* function_name,
+ TextureRef* texture_ref,
+ GLenum target,
+ GLint level,
+ const void * data) {
+ // We only support async uploads to 2D textures for now.
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, target, "target");
+ return false;
+ }
+ // We only support uploads to level zero for now.
+ if (level != 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "level != 0");
+ return false;
+ }
+ // A transfer buffer must be bound, even for asyncTexImage2D.
+ if (data == NULL) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, "buffer == 0");
+ return false;
+ }
+ // We only support one async transfer in progress.
+ if (!texture_ref ||
+ async_pixel_transfer_manager_->AsyncTransferIsInProgress(texture_ref)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ function_name, "transfer already in progress");
+ return false;
+ }
+ return true;
+}
+
+base::Closure GLES2DecoderImpl::AsyncUploadTokenCompletionClosure(
+ uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset) {
+ scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_data_shm_id);
+ if (!buffer.get() ||
+ !buffer->GetDataAddress(sync_data_shm_offset, sizeof(AsyncUploadSync)))
+ return base::Closure();
+
+ AsyncMemoryParams mem_params(buffer,
+ sync_data_shm_offset,
+ sizeof(AsyncUploadSync));
+
+ scoped_refptr<AsyncUploadTokenCompletionObserver> observer(
+ new AsyncUploadTokenCompletionObserver(async_upload_token));
+
+ return base::Bind(
+ &AsyncPixelTransferManager::AsyncNotifyCompletion,
+ base::Unretained(GetAsyncPixelTransferManager()),
+ mem_params,
+ observer);
+}
+
+error::Error GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AsyncTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::AsyncTexImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
+ uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
+ uint32 pixels_size;
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
+
+ // TODO(epenner): Move this and copies of this memory validation
+ // into ValidateTexImage2D step.
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &pixels_size, NULL,
+ NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = NULL;
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAs<const void*>(
+ pixels_shm_id, pixels_shm_offset, pixels_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
+ }
+
+ TextureManager::DoTextImage2DArguments args = {
+ target, level, internal_format, width, height, border, format, type,
+ pixels, pixels_size};
+ TextureRef* texture_ref;
+ // All the normal glTexSubImage2D validation.
+ if (!texture_manager()->ValidateTexImage2D(
+ &state_, "glAsyncTexImage2DCHROMIUM", args, &texture_ref)) {
+ return error::kNoError;
+ }
+
+ // Extra async validation.
+ Texture* texture = texture_ref->texture();
+ if (!ValidateAsyncTransfer(
+ "glAsyncTexImage2DCHROMIUM", texture_ref, target, level, pixels))
+ return error::kNoError;
+
+ // Don't allow async redefinition of a textures.
+ if (texture->IsDefined()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glAsyncTexImage2DCHROMIUM", "already defined");
+ return error::kNoError;
+ }
+
+ if (!EnsureGPUMemoryAvailable(pixels_size)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, "glAsyncTexImage2DCHROMIUM", "out of memory");
+ return error::kNoError;
+ }
+
+ // Setup the parameters.
+ AsyncTexImage2DParams tex_params = {
+ target, level, static_cast<GLenum>(internal_format),
+ width, height, border, format, type};
+ AsyncMemoryParams mem_params(
+ GetSharedMemoryBuffer(c.pixels_shm_id), c.pixels_shm_offset, pixels_size);
+
+ // Set up the async state if needed, and make the texture
+ // immutable so the async state stays valid. The level info
+ // is set up lazily when the transfer completes.
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->CreatePixelTransferDelegate(texture_ref,
+ tex_params);
+ texture->SetImmutable(true);
+
+ delegate->AsyncTexImage2D(
+ tex_params,
+ mem_params,
+ base::Bind(&TextureManager::SetLevelInfoFromParams,
+ // The callback is only invoked if the transfer delegate still
+ // exists, which implies through manager->texture_ref->state
+ // ownership that both of these pointers are valid.
+ base::Unretained(texture_manager()),
+ base::Unretained(texture_ref),
+ tex_params));
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleAsyncTexSubImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AsyncTexSubImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::AsyncTexSubImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexSubImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLenum type = static_cast<GLenum>(c.type);
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
+
+ // TODO(epenner): Move this and copies of this memory validation
+ // into ValidateTexSubImage2D step.
+ uint32 data_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, state_.unpack_alignment, &data_size,
+ NULL, NULL)) {
+ return error::kOutOfBounds;
+ }
+ const void* pixels = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+
+ // All the normal glTexSubImage2D validation.
+ error::Error error = error::kNoError;
+ if (!ValidateTexSubImage2D(&error, "glAsyncTexSubImage2DCHROMIUM",
+ target, level, xoffset, yoffset, width, height, format, type, pixels)) {
+ return error;
+ }
+
+ // Extra async validation.
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ Texture* texture = texture_ref->texture();
+ if (!ValidateAsyncTransfer(
+ "glAsyncTexSubImage2DCHROMIUM", texture_ref, target, level, pixels))
+ return error::kNoError;
+
+ // Guarantee async textures are always 'cleared' as follows:
+ // - AsyncTexImage2D can not redefine an existing texture
+ // - AsyncTexImage2D must initialize the entire image via non-null buffer.
+ // - AsyncTexSubImage2D clears synchronously if not already cleared.
+ // - Textures become immutable after an async call.
+ // This way we know in all cases that an async texture is always clear.
+ if (!texture->SafeToRenderFrom()) {
+ if (!texture_manager()->ClearTextureLevel(this, texture_ref,
+ target, level)) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY,
+ "glAsyncTexSubImage2DCHROMIUM", "dimensions too big");
+ return error::kNoError;
+ }
+ }
+
+ // Setup the parameters.
+ AsyncTexSubImage2DParams tex_params = {target, level, xoffset, yoffset,
+ width, height, format, type};
+ AsyncMemoryParams mem_params(
+ GetSharedMemoryBuffer(c.data_shm_id), c.data_shm_offset, data_size);
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->GetPixelTransferDelegate(texture_ref);
+ if (!delegate) {
+ // TODO(epenner): We may want to enforce exclusive use
+ // of async APIs in which case this should become an error,
+ // (the texture should have been async defined).
+ AsyncTexImage2DParams define_params = {target, level,
+ 0, 0, 0, 0, 0, 0};
+ texture->GetLevelSize(target, level, &define_params.width,
+ &define_params.height);
+ texture->GetLevelType(target, level, &define_params.type,
+ &define_params.internal_format);
+ // Set up the async state if needed, and make the texture
+ // immutable so the async state stays valid.
+ delegate = async_pixel_transfer_manager_->CreatePixelTransferDelegate(
+ texture_ref, define_params);
+ texture->SetImmutable(true);
+ }
+
+ delegate->AsyncTexSubImage2D(tex_params, mem_params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::WaitAsyncTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::WaitAsyncTexImage2DCHROMIUM*>(cmd_data);
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM");
+ GLenum target = static_cast<GLenum>(c.target);
+
+ if (GL_TEXTURE_2D != target) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_ENUM, "glWaitAsyncTexImage2DCHROMIUM", "target");
+ return error::kNoError;
+ }
+ TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
+ &state_, target);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glWaitAsyncTexImage2DCHROMIUM", "unknown texture");
+ return error::kNoError;
+ }
+ AsyncPixelTransferDelegate* delegate =
+ async_pixel_transfer_manager_->GetPixelTransferDelegate(texture_ref);
+ if (!delegate) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION,
+ "glWaitAsyncTexImage2DCHROMIUM", "No async transfer started");
+ return error::kNoError;
+ }
+ delegate->WaitForTransferCompletion();
+ ProcessFinishedAsyncTransfers();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitAllAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size,
+ const void* data) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM");
+
+ GetAsyncPixelTransferManager()->WaitAllAsyncTexImage2D();
+ ProcessFinishedAsyncTransfers();
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::OnTextureRefDetachedFromFramebuffer(
+ TextureRef* texture_ref) {
+ Texture* texture = texture_ref->texture();
+ DoDidUseTexImageIfNeeded(texture, texture->target());
+}
+
+void GLES2DecoderImpl::OnOutOfMemoryError() {
+ if (lose_context_when_out_of_memory_) {
+ group_->LoseContexts(GL_UNKNOWN_CONTEXT_RESET_ARB);
+ LoseContext(GL_GUILTY_CONTEXT_RESET_ARB);
+ }
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/gles2_cmd_decoder_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
new file mode 100644
index 0000000..5c94b93
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -0,0 +1,261 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GLES2Decoder class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_context.h"
+
+namespace gfx {
+class GLContext;
+class GLSurface;
+}
+
+namespace gpu {
+
+class AsyncPixelTransferDelegate;
+class AsyncPixelTransferManager;
+struct Mailbox;
+
+namespace gles2 {
+
+class ContextGroup;
+class ErrorState;
+class GLES2Util;
+class ImageManager;
+class Logger;
+class QueryManager;
+class VertexArrayManager;
+struct ContextState;
+
+struct DisallowedFeatures {
+ DisallowedFeatures()
+ : gpu_memory_manager(false) {
+ }
+
+ bool gpu_memory_manager;
+};
+
+typedef base::Callback<void(const std::string& key,
+ const std::string& shader)> ShaderCacheCallback;
+
+// This class implements the AsyncAPIInterface interface, decoding GLES2
+// commands and calling GL.
+class GPU_EXPORT GLES2Decoder : public base::SupportsWeakPtr<GLES2Decoder>,
+ public CommonDecoder {
+ public:
+ typedef error::Error Error;
+ typedef base::Callback<bool(uint32 id)> WaitSyncPointCallback;
+
+ // The default stencil mask, which has all bits set. This really should be a
+ // GLuint, but we can't #include gl_bindings.h in this file without causing
+ // macro redefinitions.
+ static const unsigned int kDefaultStencilMask;
+
+ // Creates a decoder.
+ static GLES2Decoder* Create(ContextGroup* group);
+
+ virtual ~GLES2Decoder();
+
+ bool initialized() const {
+ return initialized_;
+ }
+
+ void set_initialized() {
+ initialized_ = true;
+ }
+
+ bool debug() const {
+ return debug_;
+ }
+
+ // Set to true to call glGetError after every command.
+ void set_debug(bool debug) {
+ debug_ = debug;
+ }
+
+ bool log_commands() const {
+ return log_commands_;
+ }
+
+ // Set to true to LOG every command.
+ void set_log_commands(bool log_commands) {
+ log_commands_ = log_commands;
+ }
+
+ // Initializes the graphics context. Can create an offscreen
+ // decoder with a frame buffer that can be referenced from the parent.
+ // Takes ownership of GLContext.
+ // Parameters:
+ // surface: the GL surface to render to.
+ // context: the GL context to render to.
+ // offscreen: whether to make the context offscreen or not. When FBO 0 is
+ // bound, offscreen contexts render to an internal buffer, onscreen ones
+ // to the surface.
+ // size: the size if the GL context is offscreen.
+ // Returns:
+ // true if successful.
+ virtual bool Initialize(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs) = 0;
+
+ // Destroys the graphics context.
+ virtual void Destroy(bool have_context) = 0;
+
+ // Set the surface associated with the default FBO.
+ virtual void SetSurface(const scoped_refptr<gfx::GLSurface>& surface) = 0;
+
+ virtual void ProduceFrontBuffer(const Mailbox& mailbox) = 0;
+
+ // Resize an offscreen frame buffer.
+ virtual bool ResizeOffscreenFrameBuffer(const gfx::Size& size) = 0;
+
+ // Make this decoder's GL context current.
+ virtual bool MakeCurrent() = 0;
+
+ // Gets the GLES2 Util which holds info.
+ virtual GLES2Util* GetGLES2Util() = 0;
+
+ // Gets the associated GLContext.
+ virtual gfx::GLContext* GetGLContext() = 0;
+
+ // Gets the associated ContextGroup
+ virtual ContextGroup* GetContextGroup() = 0;
+
+ virtual Capabilities GetCapabilities() = 0;
+
+ // Restores all of the decoder GL state.
+ virtual void RestoreState(const ContextState* prev_state) = 0;
+
+ // Restore States.
+ virtual void RestoreActiveTexture() const = 0;
+ virtual void RestoreAllTextureUnitBindings(
+ const ContextState* prev_state) const = 0;
+ virtual void RestoreActiveTextureUnitBinding(unsigned int target) const = 0;
+ virtual void RestoreBufferBindings() const = 0;
+ virtual void RestoreFramebufferBindings() const = 0;
+ virtual void RestoreRenderbufferBindings() = 0;
+ virtual void RestoreGlobalState() const = 0;
+ virtual void RestoreProgramBindings() const = 0;
+ virtual void RestoreTextureState(unsigned service_id) const = 0;
+ virtual void RestoreTextureUnitBindings(unsigned unit) const = 0;
+
+ virtual void ClearAllAttributes() const = 0;
+ virtual void RestoreAllAttributes() const = 0;
+
+ virtual void SetIgnoreCachedStateForTest(bool ignore) = 0;
+
+ // Gets the QueryManager for this context.
+ virtual QueryManager* GetQueryManager() = 0;
+
+ // Gets the VertexArrayManager for this context.
+ virtual VertexArrayManager* GetVertexArrayManager() = 0;
+
+ // Gets the ImageManager for this context.
+ virtual ImageManager* GetImageManager() = 0;
+
+ // Process any pending queries. Returns false if there are no pending queries.
+ virtual bool ProcessPendingQueries() = 0;
+
+ // Returns false if there are no idle work to be made.
+ virtual bool HasMoreIdleWork() = 0;
+
+ virtual void PerformIdleWork() = 0;
+
+ // Sets a callback which is called when a glResizeCHROMIUM command
+ // is processed.
+ virtual void SetResizeCallback(
+ const base::Callback<void(gfx::Size, float)>& callback) = 0;
+
+ // Interface to performing async pixel transfers.
+ virtual AsyncPixelTransferManager* GetAsyncPixelTransferManager() = 0;
+ virtual void ResetAsyncPixelTransferManagerForTest() = 0;
+ virtual void SetAsyncPixelTransferManagerForTest(
+ AsyncPixelTransferManager* manager) = 0;
+
+ // Get the service texture ID corresponding to a client texture ID.
+ // If no such record is found then return false.
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id);
+
+ // Provides detail about a lost context if one occurred.
+ virtual error::ContextLostReason GetContextLostReason() = 0;
+
+ // Clears a level of a texture
+ // Returns false if a GL error should be generated.
+ virtual bool ClearLevel(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable) = 0;
+
+ virtual ErrorState* GetErrorState() = 0;
+
+ // A callback for messages from the decoder.
+ virtual void SetShaderCacheCallback(const ShaderCacheCallback& callback) = 0;
+
+ // Sets the callback for waiting on a sync point. The callback returns the
+ // scheduling status (i.e. true if the channel is still scheduled).
+ virtual void SetWaitSyncPointCallback(
+ const WaitSyncPointCallback& callback) = 0;
+
+ virtual void WaitForReadPixels(base::Closure callback) = 0;
+ virtual uint32 GetTextureUploadCount() = 0;
+ virtual base::TimeDelta GetTotalTextureUploadTime() = 0;
+ virtual base::TimeDelta GetTotalProcessingCommandsTime() = 0;
+ virtual void AddProcessingCommandsTime(base::TimeDelta) = 0;
+
+ // Returns true if the context was lost either by GL_ARB_robustness, forced
+ // context loss or command buffer parse error.
+ virtual bool WasContextLost() = 0;
+
+ // Returns true if the context was lost specifically by GL_ARB_robustness.
+ virtual bool WasContextLostByRobustnessExtension() = 0;
+
+ // Lose this context.
+ virtual void LoseContext(uint32 reset_status) = 0;
+
+ virtual Logger* GetLogger() = 0;
+
+ virtual void BeginDecoding();
+ virtual void EndDecoding();
+
+ virtual const ContextState* GetContextState() = 0;
+
+ protected:
+ GLES2Decoder();
+
+ private:
+ bool initialized_;
+ bool debug_;
+ bool log_commands_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2Decoder);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
new file mode 100644
index 0000000..dade363
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -0,0 +1,3382 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
+
+error::Error GLES2DecoderImpl::HandleActiveTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ActiveTexture& c =
+ *static_cast<const gles2::cmds::ActiveTexture*>(cmd_data);
+ (void)c;
+ GLenum texture = static_cast<GLenum>(c.texture);
+ DoActiveTexture(texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleAttachShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::AttachShader& c =
+ *static_cast<const gles2::cmds::AttachShader*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLuint shader = c.shader;
+ DoAttachShader(program, shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindBuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindBuffer& c =
+ *static_cast<const gles2::cmds::BindBuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint buffer = c.buffer;
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindBuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindBuffer(target, buffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindFramebuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindFramebuffer& c =
+ *static_cast<const gles2::cmds::BindFramebuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint framebuffer = c.framebuffer;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindFramebuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindFramebuffer(target, framebuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindRenderbuffer& c =
+ *static_cast<const gles2::cmds::BindRenderbuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint renderbuffer = c.renderbuffer;
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindRenderbuffer", target, "target");
+ return error::kNoError;
+ }
+ DoBindRenderbuffer(target, renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindTexture& c =
+ *static_cast<const gles2::cmds::BindTexture*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint texture = c.texture;
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindTexture", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexture(target, texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendColor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendColor& c =
+ *static_cast<const gles2::cmds::BlendColor*>(cmd_data);
+ (void)c;
+ GLclampf red = static_cast<GLclampf>(c.red);
+ GLclampf green = static_cast<GLclampf>(c.green);
+ GLclampf blue = static_cast<GLclampf>(c.blue);
+ GLclampf alpha = static_cast<GLclampf>(c.alpha);
+ if (state_.blend_color_red != red || state_.blend_color_green != green ||
+ state_.blend_color_blue != blue || state_.blend_color_alpha != alpha) {
+ state_.blend_color_red = red;
+ state_.blend_color_green = green;
+ state_.blend_color_blue = blue;
+ state_.blend_color_alpha = alpha;
+ glBlendColor(red, green, blue, alpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquation(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendEquation& c =
+ *static_cast<const gles2::cmds::BlendEquation*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->equation.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendEquation", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.blend_equation_rgb != mode ||
+ state_.blend_equation_alpha != mode) {
+ state_.blend_equation_rgb = mode;
+ state_.blend_equation_alpha = mode;
+ glBlendEquation(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendEquationSeparate& c =
+ *static_cast<const gles2::cmds::BlendEquationSeparate*>(cmd_data);
+ (void)c;
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ if (!validators_->equation.IsValid(modeRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendEquationSeparate", modeRGB, "modeRGB");
+ return error::kNoError;
+ }
+ if (!validators_->equation.IsValid(modeAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendEquationSeparate", modeAlpha, "modeAlpha");
+ return error::kNoError;
+ }
+ if (state_.blend_equation_rgb != modeRGB ||
+ state_.blend_equation_alpha != modeAlpha) {
+ state_.blend_equation_rgb = modeRGB;
+ state_.blend_equation_alpha = modeAlpha;
+ glBlendEquationSeparate(modeRGB, modeAlpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendFunc& c =
+ *static_cast<const gles2::cmds::BlendFunc*>(cmd_data);
+ (void)c;
+ GLenum sfactor = static_cast<GLenum>(c.sfactor);
+ GLenum dfactor = static_cast<GLenum>(c.dfactor);
+ if (!validators_->src_blend_factor.IsValid(sfactor)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFunc", sfactor, "sfactor");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dfactor)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFunc", dfactor, "dfactor");
+ return error::kNoError;
+ }
+ if (state_.blend_source_rgb != sfactor || state_.blend_dest_rgb != dfactor ||
+ state_.blend_source_alpha != sfactor ||
+ state_.blend_dest_alpha != dfactor) {
+ state_.blend_source_rgb = sfactor;
+ state_.blend_dest_rgb = dfactor;
+ state_.blend_source_alpha = sfactor;
+ state_.blend_dest_alpha = dfactor;
+ glBlendFunc(sfactor, dfactor);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFuncSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlendFuncSeparate& c =
+ *static_cast<const gles2::cmds::BlendFuncSeparate*>(cmd_data);
+ (void)c;
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ if (!validators_->src_blend_factor.IsValid(srcRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFuncSeparate", srcRGB, "srcRGB");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dstRGB)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBlendFuncSeparate", dstRGB, "dstRGB");
+ return error::kNoError;
+ }
+ if (!validators_->src_blend_factor.IsValid(srcAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendFuncSeparate", srcAlpha, "srcAlpha");
+ return error::kNoError;
+ }
+ if (!validators_->dst_blend_factor.IsValid(dstAlpha)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlendFuncSeparate", dstAlpha, "dstAlpha");
+ return error::kNoError;
+ }
+ if (state_.blend_source_rgb != srcRGB || state_.blend_dest_rgb != dstRGB ||
+ state_.blend_source_alpha != srcAlpha ||
+ state_.blend_dest_alpha != dstAlpha) {
+ state_.blend_source_rgb = srcRGB;
+ state_.blend_dest_rgb = dstRGB;
+ state_.blend_source_alpha = srcAlpha;
+ state_.blend_dest_alpha = dstAlpha;
+ glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBufferSubData(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BufferSubData& c =
+ *static_cast<const gles2::cmds::BufferSubData*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLintptr offset = static_cast<GLintptr>(c.offset);
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32_t data_size = size;
+ const void* data = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBufferSubData", target, "target");
+ return error::kNoError;
+ }
+ if (size < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glBufferSubData", "size < 0");
+ return error::kNoError;
+ }
+ if (data == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoBufferSubData(target, offset, size, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCheckFramebufferStatus(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CheckFramebufferStatus& c =
+ *static_cast<const gles2::cmds::CheckFramebufferStatus*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ typedef cmds::CheckFramebufferStatus::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCheckFramebufferStatus", target, "target");
+ return error::kNoError;
+ }
+ *result_dst = DoCheckFramebufferStatus(target);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClear(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Clear& c =
+ *static_cast<const gles2::cmds::Clear*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ GLbitfield mask = static_cast<GLbitfield>(c.mask);
+ DoClear(mask);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearColor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearColor& c =
+ *static_cast<const gles2::cmds::ClearColor*>(cmd_data);
+ (void)c;
+ GLclampf red = static_cast<GLclampf>(c.red);
+ GLclampf green = static_cast<GLclampf>(c.green);
+ GLclampf blue = static_cast<GLclampf>(c.blue);
+ GLclampf alpha = static_cast<GLclampf>(c.alpha);
+ if (state_.color_clear_red != red || state_.color_clear_green != green ||
+ state_.color_clear_blue != blue || state_.color_clear_alpha != alpha) {
+ state_.color_clear_red = red;
+ state_.color_clear_green = green;
+ state_.color_clear_blue = blue;
+ state_.color_clear_alpha = alpha;
+ glClearColor(red, green, blue, alpha);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearDepthf(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearDepthf& c =
+ *static_cast<const gles2::cmds::ClearDepthf*>(cmd_data);
+ (void)c;
+ GLclampf depth = static_cast<GLclampf>(c.depth);
+ if (state_.depth_clear != depth) {
+ state_.depth_clear = depth;
+ glClearDepth(depth);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleClearStencil(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ClearStencil& c =
+ *static_cast<const gles2::cmds::ClearStencil*>(cmd_data);
+ (void)c;
+ GLint s = static_cast<GLint>(c.s);
+ if (state_.stencil_clear != s) {
+ state_.stencil_clear = s;
+ glClearStencil(s);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleColorMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ColorMask& c =
+ *static_cast<const gles2::cmds::ColorMask*>(cmd_data);
+ (void)c;
+ GLboolean red = static_cast<GLboolean>(c.red);
+ GLboolean green = static_cast<GLboolean>(c.green);
+ GLboolean blue = static_cast<GLboolean>(c.blue);
+ GLboolean alpha = static_cast<GLboolean>(c.alpha);
+ if (state_.color_mask_red != red || state_.color_mask_green != green ||
+ state_.color_mask_blue != blue || state_.color_mask_alpha != alpha) {
+ state_.color_mask_red = red;
+ state_.color_mask_green = green;
+ state_.color_mask_blue = blue;
+ state_.color_mask_alpha = alpha;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompileShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompileShader& c =
+ *static_cast<const gles2::cmds::CompileShader*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ DoCompileShader(shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCompressedTexSubImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CompressedTexSubImage2D& c =
+ *static_cast<const gles2::cmds::CompressedTexSubImage2D*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLenum format = static_cast<GLenum>(c.format);
+ GLsizei imageSize = static_cast<GLsizei>(c.imageSize);
+ uint32_t data_size = imageSize;
+ const void* data = GetSharedMemoryAs<const void*>(
+ c.data_shm_id, c.data_shm_offset, data_size);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ if (!validators_->compressed_texture_format.IsValid(format)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCompressedTexSubImage2D", format, "format");
+ return error::kNoError;
+ }
+ if (imageSize < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glCompressedTexSubImage2D", "imageSize < 0");
+ return error::kNoError;
+ }
+ if (data == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoCompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTexImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTexImage2D& c =
+ *static_cast<const gles2::cmds::CopyTexImage2D*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLint border = static_cast<GLint>(c.border);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopyTexImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCopyTexImage2D", internalformat, "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexImage2D", "height < 0");
+ return error::kNoError;
+ }
+ DoCopyTexImage2D(target, level, internalformat, x, y, width, height, border);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTexSubImage2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTexSubImage2D& c =
+ *static_cast<const gles2::cmds::CopyTexSubImage2D*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint level = static_cast<GLint>(c.level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopyTexSubImage2D", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexSubImage2D", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTexSubImage2D", "height < 0");
+ return error::kNoError;
+ }
+ DoCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCreateProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateProgram& c =
+ *static_cast<const gles2::cmds::CreateProgram*>(cmd_data);
+ (void)c;
+ uint32_t client_id = c.client_id;
+ if (!CreateProgramHelper(client_id)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCreateShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CreateShader& c =
+ *static_cast<const gles2::cmds::CreateShader*>(cmd_data);
+ (void)c;
+ GLenum type = static_cast<GLenum>(c.type);
+ if (!validators_->shader_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCreateShader", type, "type");
+ return error::kNoError;
+ }
+ uint32_t client_id = c.client_id;
+ if (!CreateShaderHelper(type, client_id)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCullFace(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CullFace& c =
+ *static_cast<const gles2::cmds::CullFace*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->face_type.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCullFace", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.cull_mode != mode) {
+ state_.cull_mode = mode;
+ glCullFace(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteBuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteBuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteBuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* buffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (buffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteBuffersHelper(n, buffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteFramebuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteFramebuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteFramebuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* framebuffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (framebuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteFramebuffersHelper(n, framebuffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteRenderbuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteRenderbuffersImmediate& c =
+ *static_cast<const gles2::cmds::DeleteRenderbuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* renderbuffers =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (renderbuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteRenderbuffersHelper(n, renderbuffers);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteTexturesImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteTexturesImmediate& c =
+ *static_cast<const gles2::cmds::DeleteTexturesImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* textures =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteTexturesHelper(n, textures);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthFunc& c =
+ *static_cast<const gles2::cmds::DepthFunc*>(cmd_data);
+ (void)c;
+ GLenum func = static_cast<GLenum>(c.func);
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glDepthFunc", func, "func");
+ return error::kNoError;
+ }
+ if (state_.depth_func != func) {
+ state_.depth_func = func;
+ glDepthFunc(func);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthMask& c =
+ *static_cast<const gles2::cmds::DepthMask*>(cmd_data);
+ (void)c;
+ GLboolean flag = static_cast<GLboolean>(c.flag);
+ if (state_.depth_mask != flag) {
+ state_.depth_mask = flag;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDepthRangef(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DepthRangef& c =
+ *static_cast<const gles2::cmds::DepthRangef*>(cmd_data);
+ (void)c;
+ GLclampf zNear = static_cast<GLclampf>(c.zNear);
+ GLclampf zFar = static_cast<GLclampf>(c.zFar);
+ DoDepthRangef(zNear, zFar);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDetachShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DetachShader& c =
+ *static_cast<const gles2::cmds::DetachShader*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLuint shader = c.shader;
+ DoDetachShader(program, shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisable(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Disable& c =
+ *static_cast<const gles2::cmds::Disable*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glDisable", cap, "cap");
+ return error::kNoError;
+ }
+ DoDisable(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisableVertexAttribArray(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DisableVertexAttribArray& c =
+ *static_cast<const gles2::cmds::DisableVertexAttribArray*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ DoDisableVertexAttribArray(index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEnable(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Enable& c =
+ *static_cast<const gles2::cmds::Enable*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glEnable", cap, "cap");
+ return error::kNoError;
+ }
+ DoEnable(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEnableVertexAttribArray(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::EnableVertexAttribArray& c =
+ *static_cast<const gles2::cmds::EnableVertexAttribArray*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ DoEnableVertexAttribArray(index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFinish(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Finish& c =
+ *static_cast<const gles2::cmds::Finish*>(cmd_data);
+ (void)c;
+ error::Error error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ DoFinish();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFlush(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Flush& c =
+ *static_cast<const gles2::cmds::Flush*>(cmd_data);
+ (void)c;
+ DoFlush();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferRenderbuffer& c =
+ *static_cast<const gles2::cmds::FramebufferRenderbuffer*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum renderbuffertarget = static_cast<GLenum>(c.renderbuffertarget);
+ GLuint renderbuffer = c.renderbuffer;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_target.IsValid(renderbuffertarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferRenderbuffer", renderbuffertarget, "renderbuffertarget");
+ return error::kNoError;
+ }
+ DoFramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferTexture2D(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferTexture2D& c =
+ *static_cast<const gles2::cmds::FramebufferTexture2D*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum textarget = static_cast<GLenum>(c.textarget);
+ GLuint texture = c.texture;
+ GLint level = static_cast<GLint>(c.level);
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glFramebufferTexture2D", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2D", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->texture_target.IsValid(textarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2D", textarget, "textarget");
+ return error::kNoError;
+ }
+ DoFramebufferTexture2D(target, attachment, textarget, texture, level);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFrontFace(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FrontFace& c =
+ *static_cast<const gles2::cmds::FrontFace*>(cmd_data);
+ (void)c;
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->face_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glFrontFace", mode, "mode");
+ return error::kNoError;
+ }
+ if (state_.front_face != mode) {
+ state_.front_face = mode;
+ glFrontFace(mode);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenBuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenBuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenBuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* buffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (buffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenBuffersHelper(n, buffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenerateMipmap(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenerateMipmap& c =
+ *static_cast<const gles2::cmds::GenerateMipmap*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGenerateMipmap", target, "target");
+ return error::kNoError;
+ }
+ DoGenerateMipmap(target);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenFramebuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenFramebuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenFramebuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* framebuffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (framebuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenFramebuffersHelper(n, framebuffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenRenderbuffersImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenRenderbuffersImmediate& c =
+ *static_cast<const gles2::cmds::GenRenderbuffersImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* renderbuffers =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (renderbuffers == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenRenderbuffersHelper(n, renderbuffers)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenTexturesImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenTexturesImmediate& c =
+ *static_cast<const gles2::cmds::GenTexturesImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* textures =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenTexturesHelper(n, textures)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetBooleanv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetBooleanv& c =
+ *static_cast<const gles2::cmds::GetBooleanv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetBooleanv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLboolean* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBooleanv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetBooleanv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBooleanv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetBooleanv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetBufferParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetBufferParameteriv& c =
+ *static_cast<const gles2::cmds::GetBufferParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetBufferParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBufferParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBufferParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBufferParameteriv(target, pname, params);
+ result->SetNumResults(num_values);
+ return error::kNoError;
+}
+error::Error GLES2DecoderImpl::HandleGetError(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetError& c =
+ *static_cast<const gles2::cmds::GetError*>(cmd_data);
+ (void)c;
+ typedef cmds::GetError::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = GetErrorState()->GetGLError();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetFloatv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetFloatv& c =
+ *static_cast<const gles2::cmds::GetFloatv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetFloatv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetFloatv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetFloatv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetFloatv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetFloatv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetFramebufferAttachmentParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetFramebufferAttachmentParameteriv& c =
+ *static_cast<const gles2::cmds::GetFramebufferAttachmentParameteriv*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->frame_buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetFramebufferAttachmentParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetFramebufferAttachmentParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetFramebufferAttachmentParameteriv(target, attachment, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetFramebufferAttachmentParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetIntegerv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetIntegerv& c =
+ *static_cast<const gles2::cmds::GetIntegerv*>(cmd_data);
+ (void)c;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetIntegerv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetIntegerv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetIntegerv(pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetIntegerv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramiv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetProgramiv& c =
+ *static_cast<const gles2::cmds::GetProgramiv*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetProgramiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->program_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetProgramiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetProgramiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetProgramiv(program, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetProgramiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetRenderbufferParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetRenderbufferParameteriv& c =
+ *static_cast<const gles2::cmds::GetRenderbufferParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetRenderbufferParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetRenderbufferParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetRenderbufferParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetRenderbufferParameteriv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetRenderbufferParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetShaderiv(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetShaderiv& c =
+ *static_cast<const gles2::cmds::GetShaderiv*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetShaderiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->shader_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetShaderiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetShaderiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetShaderiv(shader, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetShaderiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTexParameterfv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTexParameterfv& c =
+ *static_cast<const gles2::cmds::GetTexParameterfv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetTexParameterfv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->get_tex_param_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameterfv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameterfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetTexParameterfv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetTexParameterfv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetTexParameterfv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetTexParameteriv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetTexParameteriv& c =
+ *static_cast<const gles2::cmds::GetTexParameteriv*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetTexParameteriv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->get_tex_param_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetTexParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetTexParameteriv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetTexParameteriv(target, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetTexParameteriv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribfv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribfv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribfv*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribfv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLfloat* params = result ? result->GetData() : NULL;
+ if (!validators_->vertex_attribute.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetVertexAttribfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetVertexAttribfv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetVertexAttribfv(index, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetVertexAttribfv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetVertexAttribiv(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetVertexAttribiv& c =
+ *static_cast<const gles2::cmds::GetVertexAttribiv*>(cmd_data);
+ (void)c;
+ GLuint index = static_cast<GLuint>(c.index);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetVertexAttribiv::Result Result;
+ GLsizei num_values = 0;
+ GetNumValuesReturnedForGLGet(pname, &num_values);
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->vertex_attribute.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetVertexAttribiv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetVertexAttribiv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetVertexAttribiv(index, pname, params);
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ } else {
+ LOCAL_SET_GL_ERROR(error, "GetVertexAttribiv", "");
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleHint(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Hint& c = *static_cast<const gles2::cmds::Hint*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ if (!validators_->hint_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glHint", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->hint_mode.IsValid(mode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glHint", mode, "mode");
+ return error::kNoError;
+ }
+ switch (target) {
+ case GL_GENERATE_MIPMAP_HINT:
+ if (state_.hint_generate_mipmap != mode) {
+ state_.hint_generate_mipmap = mode;
+ glHint(target, mode);
+ }
+ break;
+ case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+ if (state_.hint_fragment_shader_derivative != mode) {
+ state_.hint_fragment_shader_derivative = mode;
+ glHint(target, mode);
+ }
+ break;
+ default:
+ NOTREACHED();
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsBuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsBuffer& c =
+ *static_cast<const gles2::cmds::IsBuffer*>(cmd_data);
+ (void)c;
+ GLuint buffer = c.buffer;
+ typedef cmds::IsBuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsBuffer(buffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsEnabled(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsEnabled& c =
+ *static_cast<const gles2::cmds::IsEnabled*>(cmd_data);
+ (void)c;
+ GLenum cap = static_cast<GLenum>(c.cap);
+ typedef cmds::IsEnabled::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (!validators_->capability.IsValid(cap)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glIsEnabled", cap, "cap");
+ return error::kNoError;
+ }
+ *result_dst = DoIsEnabled(cap);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsFramebuffer(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsFramebuffer& c =
+ *static_cast<const gles2::cmds::IsFramebuffer*>(cmd_data);
+ (void)c;
+ GLuint framebuffer = c.framebuffer;
+ typedef cmds::IsFramebuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsFramebuffer(framebuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsProgram& c =
+ *static_cast<const gles2::cmds::IsProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ typedef cmds::IsProgram::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsRenderbuffer(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsRenderbuffer& c =
+ *static_cast<const gles2::cmds::IsRenderbuffer*>(cmd_data);
+ (void)c;
+ GLuint renderbuffer = c.renderbuffer;
+ typedef cmds::IsRenderbuffer::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsRenderbuffer(renderbuffer);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsShader(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsShader& c =
+ *static_cast<const gles2::cmds::IsShader*>(cmd_data);
+ (void)c;
+ GLuint shader = c.shader;
+ typedef cmds::IsShader::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsShader(shader);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsTexture(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsTexture& c =
+ *static_cast<const gles2::cmds::IsTexture*>(cmd_data);
+ (void)c;
+ GLuint texture = c.texture;
+ typedef cmds::IsTexture::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsTexture(texture);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLineWidth(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LineWidth& c =
+ *static_cast<const gles2::cmds::LineWidth*>(cmd_data);
+ (void)c;
+ GLfloat width = static_cast<GLfloat>(c.width);
+ if (width <= 0.0f || base::IsNaN(width)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "LineWidth", "width out of range");
+ return error::kNoError;
+ }
+ if (state_.line_width != width) {
+ state_.line_width = width;
+ glLineWidth(width);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLinkProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LinkProgram& c =
+ *static_cast<const gles2::cmds::LinkProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoLinkProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePolygonOffset(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PolygonOffset& c =
+ *static_cast<const gles2::cmds::PolygonOffset*>(cmd_data);
+ (void)c;
+ GLfloat factor = static_cast<GLfloat>(c.factor);
+ GLfloat units = static_cast<GLfloat>(c.units);
+ if (state_.polygon_offset_factor != factor ||
+ state_.polygon_offset_units != units) {
+ state_.polygon_offset_factor = factor;
+ state_.polygon_offset_units = units;
+ glPolygonOffset(factor, units);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleReleaseShaderCompiler(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReleaseShaderCompiler& c =
+ *static_cast<const gles2::cmds::ReleaseShaderCompiler*>(cmd_data);
+ (void)c;
+ DoReleaseShaderCompiler();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorage(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorage& c =
+ *static_cast<const gles2::cmds::RenderbufferStorage*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorage", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorage", internalformat, "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorage", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorage", "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorage(target, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleSampleCoverage(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::SampleCoverage& c =
+ *static_cast<const gles2::cmds::SampleCoverage*>(cmd_data);
+ (void)c;
+ GLclampf value = static_cast<GLclampf>(c.value);
+ GLboolean invert = static_cast<GLboolean>(c.invert);
+ DoSampleCoverage(value, invert);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleScissor(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Scissor& c =
+ *static_cast<const gles2::cmds::Scissor*>(cmd_data);
+ (void)c;
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScissor", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScissor", "height < 0");
+ return error::kNoError;
+ }
+ if (state_.scissor_x != x || state_.scissor_y != y ||
+ state_.scissor_width != width || state_.scissor_height != height) {
+ state_.scissor_x = x;
+ state_.scissor_y = y;
+ state_.scissor_width = width;
+ state_.scissor_height = height;
+ glScissor(x, y, width, height);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilFunc(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilFunc& c =
+ *static_cast<const gles2::cmds::StencilFunc*>(cmd_data);
+ (void)c;
+ GLenum func = static_cast<GLenum>(c.func);
+ GLint ref = static_cast<GLint>(c.ref);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFunc", func, "func");
+ return error::kNoError;
+ }
+ if (state_.stencil_front_func != func || state_.stencil_front_ref != ref ||
+ state_.stencil_front_mask != mask || state_.stencil_back_func != func ||
+ state_.stencil_back_ref != ref || state_.stencil_back_mask != mask) {
+ state_.stencil_front_func = func;
+ state_.stencil_front_ref = ref;
+ state_.stencil_front_mask = mask;
+ state_.stencil_back_func = func;
+ state_.stencil_back_ref = ref;
+ state_.stencil_back_mask = mask;
+ glStencilFunc(func, ref, mask);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilFuncSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilFuncSeparate& c =
+ *static_cast<const gles2::cmds::StencilFuncSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLenum func = static_cast<GLenum>(c.func);
+ GLint ref = static_cast<GLint>(c.ref);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFuncSeparate", face, "face");
+ return error::kNoError;
+ }
+ if (!validators_->cmp_function.IsValid(func)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilFuncSeparate", func, "func");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_func != func ||
+ state_.stencil_front_ref != ref ||
+ state_.stencil_front_mask != mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_func != func ||
+ state_.stencil_back_ref != ref ||
+ state_.stencil_back_mask != mask;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_func = func;
+ state_.stencil_front_ref = ref;
+ state_.stencil_front_mask = mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_func = func;
+ state_.stencil_back_ref = ref;
+ state_.stencil_back_mask = mask;
+ }
+ glStencilFuncSeparate(face, func, ref, mask);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilMask(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilMask& c =
+ *static_cast<const gles2::cmds::StencilMask*>(cmd_data);
+ (void)c;
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (state_.stencil_front_writemask != mask ||
+ state_.stencil_back_writemask != mask) {
+ state_.stencil_front_writemask = mask;
+ state_.stencil_back_writemask = mask;
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilMaskSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilMaskSeparate& c =
+ *static_cast<const gles2::cmds::StencilMaskSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLuint mask = static_cast<GLuint>(c.mask);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilMaskSeparate", face, "face");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_writemask != mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_writemask != mask;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_writemask = mask;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_writemask = mask;
+ }
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilOp(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilOp& c =
+ *static_cast<const gles2::cmds::StencilOp*>(cmd_data);
+ (void)c;
+ GLenum fail = static_cast<GLenum>(c.fail);
+ GLenum zfail = static_cast<GLenum>(c.zfail);
+ GLenum zpass = static_cast<GLenum>(c.zpass);
+ if (!validators_->stencil_op.IsValid(fail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", fail, "fail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zfail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", zfail, "zfail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zpass)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOp", zpass, "zpass");
+ return error::kNoError;
+ }
+ if (state_.stencil_front_fail_op != fail ||
+ state_.stencil_front_z_fail_op != zfail ||
+ state_.stencil_front_z_pass_op != zpass ||
+ state_.stencil_back_fail_op != fail ||
+ state_.stencil_back_z_fail_op != zfail ||
+ state_.stencil_back_z_pass_op != zpass) {
+ state_.stencil_front_fail_op = fail;
+ state_.stencil_front_z_fail_op = zfail;
+ state_.stencil_front_z_pass_op = zpass;
+ state_.stencil_back_fail_op = fail;
+ state_.stencil_back_z_fail_op = zfail;
+ state_.stencil_back_z_pass_op = zpass;
+ glStencilOp(fail, zfail, zpass);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleStencilOpSeparate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::StencilOpSeparate& c =
+ *static_cast<const gles2::cmds::StencilOpSeparate*>(cmd_data);
+ (void)c;
+ GLenum face = static_cast<GLenum>(c.face);
+ GLenum fail = static_cast<GLenum>(c.fail);
+ GLenum zfail = static_cast<GLenum>(c.zfail);
+ GLenum zpass = static_cast<GLenum>(c.zpass);
+ if (!validators_->face_type.IsValid(face)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", face, "face");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(fail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", fail, "fail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zfail)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", zfail, "zfail");
+ return error::kNoError;
+ }
+ if (!validators_->stencil_op.IsValid(zpass)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glStencilOpSeparate", zpass, "zpass");
+ return error::kNoError;
+ }
+ bool changed = false;
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_front_fail_op != fail ||
+ state_.stencil_front_z_fail_op != zfail ||
+ state_.stencil_front_z_pass_op != zpass;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ changed |= state_.stencil_back_fail_op != fail ||
+ state_.stencil_back_z_fail_op != zfail ||
+ state_.stencil_back_z_pass_op != zpass;
+ }
+ if (changed) {
+ if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ state_.stencil_front_fail_op = fail;
+ state_.stencil_front_z_fail_op = zfail;
+ state_.stencil_front_z_pass_op = zpass;
+ }
+ if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ state_.stencil_back_fail_op = fail;
+ state_.stencil_back_z_fail_op = zfail;
+ state_.stencil_back_z_pass_op = zpass;
+ }
+ glStencilOpSeparate(face, fail, zfail, zpass);
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterf(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterf& c =
+ *static_cast<const gles2::cmds::TexParameterf*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLfloat param = static_cast<GLfloat>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterf", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterf", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameterf(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterfvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterfvImmediate& c =
+ *static_cast<const gles2::cmds::TexParameterfvImmediate*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* params =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterfv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameterfv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoTexParameterfv(target, pname, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameteri(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameteri& c =
+ *static_cast<const gles2::cmds::TexParameteri*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLint param = static_cast<GLint>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameteri(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexParameterivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexParameterivImmediate& c =
+ *static_cast<const gles2::cmds::TexParameterivImmediate*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLint), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* params =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteriv", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteriv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoTexParameteriv(target, pname, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1f& c =
+ *static_cast<const gles2::cmds::Uniform1f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat temp[1] = {
+ x,
+ };
+ DoUniform1fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform1fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform1fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1i& c =
+ *static_cast<const gles2::cmds::Uniform1i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ DoUniform1i(location, x);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform1ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform1ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform1ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform1iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2f& c =
+ *static_cast<const gles2::cmds::Uniform2f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat temp[2] = {
+ x, y,
+ };
+ DoUniform2fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform2fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform2fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2i& c =
+ *static_cast<const gles2::cmds::Uniform2i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint temp[2] = {
+ x, y,
+ };
+ DoUniform2iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform2ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform2ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform2ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform2iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3f& c =
+ *static_cast<const gles2::cmds::Uniform3f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat temp[3] = {
+ x, y, z,
+ };
+ DoUniform3fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform3fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform3fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3i& c =
+ *static_cast<const gles2::cmds::Uniform3i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint z = static_cast<GLint>(c.z);
+ GLint temp[3] = {
+ x, y, z,
+ };
+ DoUniform3iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform3ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform3ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform3ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform3iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4f(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4f& c =
+ *static_cast<const gles2::cmds::Uniform4f*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat w = static_cast<GLfloat>(c.w);
+ GLfloat temp[4] = {
+ x, y, z, w,
+ };
+ DoUniform4fv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4fvImmediate& c =
+ *static_cast<const gles2::cmds::Uniform4fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* v =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform4fv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4i(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4i& c =
+ *static_cast<const gles2::cmds::Uniform4i*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLint z = static_cast<GLint>(c.z);
+ GLint w = static_cast<GLint>(c.w);
+ GLint temp[4] = {
+ x, y, z, w,
+ };
+ DoUniform4iv(location, 1, &temp[0]);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniform4ivImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Uniform4ivImmediate& c =
+ *static_cast<const gles2::cmds::Uniform4ivImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLint), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLint* v =
+ GetImmediateDataAs<const GLint*>(c, data_size, immediate_data_size);
+ if (v == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniform4iv(location, count, v);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix2fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix2fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix2fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix3fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix3fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 9, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix3fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUniformMatrix4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UniformMatrix4fvImmediate& c =
+ *static_cast<const gles2::cmds::UniformMatrix4fvImmediate*>(cmd_data);
+ (void)c;
+ GLint location = static_cast<GLint>(c.location);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLboolean transpose = static_cast<GLboolean>(c.transpose);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLfloat), 16, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* value =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (value == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoUniformMatrix4fv(location, count, transpose, value);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleUseProgram(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::UseProgram& c =
+ *static_cast<const gles2::cmds::UseProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoUseProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleValidateProgram(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ValidateProgram& c =
+ *static_cast<const gles2::cmds::ValidateProgram*>(cmd_data);
+ (void)c;
+ GLuint program = c.program;
+ DoValidateProgram(program);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib1f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib1f& c =
+ *static_cast<const gles2::cmds::VertexAttrib1f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ DoVertexAttrib1f(indx, x);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib1fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib1fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib1fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib1fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib2f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib2f& c =
+ *static_cast<const gles2::cmds::VertexAttrib2f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ DoVertexAttrib2f(indx, x, y);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib2fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib2fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib2fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 2, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib2fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib3f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib3f& c =
+ *static_cast<const gles2::cmds::VertexAttrib3f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ DoVertexAttrib3f(indx, x, y, z);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib3fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib3fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib3fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 3, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib3fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib4f(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib4f& c =
+ *static_cast<const gles2::cmds::VertexAttrib4f*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ GLfloat x = static_cast<GLfloat>(c.x);
+ GLfloat y = static_cast<GLfloat>(c.y);
+ GLfloat z = static_cast<GLfloat>(c.z);
+ GLfloat w = static_cast<GLfloat>(c.w);
+ DoVertexAttrib4f(indx, x, y, z, w);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleVertexAttrib4fvImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::VertexAttrib4fvImmediate& c =
+ *static_cast<const gles2::cmds::VertexAttrib4fvImmediate*>(cmd_data);
+ (void)c;
+ GLuint indx = static_cast<GLuint>(c.indx);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 4, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* values =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (values == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoVertexAttrib4fv(indx, values);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleViewport(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::Viewport& c =
+ *static_cast<const gles2::cmds::Viewport*>(cmd_data);
+ (void)c;
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glViewport", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glViewport", "height < 0");
+ return error::kNoError;
+ }
+ DoViewport(x, y, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlitFramebufferCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BlitFramebufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::BlitFramebufferCHROMIUM*>(cmd_data);
+ (void)c;
+ if (!features().chromium_framebuffer_multisample) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glBlitFramebufferCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ error::Error error;
+ error = WillAccessBoundFramebufferForDraw();
+ if (error != error::kNoError)
+ return error;
+ error = WillAccessBoundFramebufferForRead();
+ if (error != error::kNoError)
+ return error;
+ GLint srcX0 = static_cast<GLint>(c.srcX0);
+ GLint srcY0 = static_cast<GLint>(c.srcY0);
+ GLint srcX1 = static_cast<GLint>(c.srcX1);
+ GLint srcY1 = static_cast<GLint>(c.srcY1);
+ GLint dstX0 = static_cast<GLint>(c.dstX0);
+ GLint dstY0 = static_cast<GLint>(c.dstY0);
+ GLint dstX1 = static_cast<GLint>(c.dstX1);
+ GLint dstY1 = static_cast<GLint>(c.dstY1);
+ GLbitfield mask = static_cast<GLbitfield>(c.mask);
+ GLenum filter = static_cast<GLenum>(c.filter);
+ if (!validators_->blit_filter.IsValid(filter)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBlitFramebufferCHROMIUM", filter, "filter");
+ return error::kNoError;
+ }
+ DoBlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorageMultisampleCHROMIUM& c =
+ *static_cast<const gles2::cmds::RenderbufferStorageMultisampleCHROMIUM*>(
+ cmd_data);
+ (void)c;
+ if (!features().chromium_framebuffer_multisample) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "samples < 0");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorageMultisampleCHROMIUM",
+ internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::RenderbufferStorageMultisampleEXT& c =
+ *static_cast<const gles2::cmds::RenderbufferStorageMultisampleEXT*>(
+ cmd_data);
+ (void)c;
+ if (!features().multisampled_render_to_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleEXT", target, "target");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "samples < 0");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glRenderbufferStorageMultisampleEXT",
+ internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleFramebufferTexture2DMultisampleEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::FramebufferTexture2DMultisampleEXT& c =
+ *static_cast<const gles2::cmds::FramebufferTexture2DMultisampleEXT*>(
+ cmd_data);
+ (void)c;
+ if (!features().multisampled_render_to_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glFramebufferTexture2DMultisampleEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum attachment = static_cast<GLenum>(c.attachment);
+ GLenum textarget = static_cast<GLenum>(c.textarget);
+ GLuint texture = c.texture;
+ GLint level = static_cast<GLint>(c.level);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ if (!validators_->frame_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->attachment.IsValid(attachment)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", attachment, "attachment");
+ return error::kNoError;
+ }
+ if (!validators_->texture_target.IsValid(textarget)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glFramebufferTexture2DMultisampleEXT", textarget, "textarget");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "samples < 0");
+ return error::kNoError;
+ }
+ DoFramebufferTexture2DMultisample(
+ target, attachment, textarget, texture, level, samples);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexStorage2DEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexStorage2DEXT& c =
+ *static_cast<const gles2::cmds::TexStorage2DEXT*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei levels = static_cast<GLsizei>(c.levels);
+ GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DEXT", target, "target");
+ return error::kNoError;
+ }
+ if (levels < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "levels < 0");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format_storage.IsValid(internalFormat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glTexStorage2DEXT", internalFormat, "internalFormat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "height < 0");
+ return error::kNoError;
+ }
+ DoTexStorage2DEXT(target, levels, internalFormat, width, height);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenQueriesEXTImmediate& c =
+ *static_cast<const gles2::cmds::GenQueriesEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* queries =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenQueriesEXTHelper(n, queries)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteQueriesEXTImmediate& c =
+ *static_cast<const gles2::cmds::DeleteQueriesEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* queries =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleInsertEventMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::InsertEventMarkerEXT& c =
+ *static_cast<const gles2::cmds::InsertEventMarkerEXT*>(cmd_data);
+ (void)c;
+
+ GLuint bucket_id = static_cast<GLuint>(c.bucket_id);
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string str;
+ if (!bucket->GetAsString(&str)) {
+ return error::kInvalidArguments;
+ }
+ DoInsertEventMarkerEXT(0, str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePushGroupMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PushGroupMarkerEXT& c =
+ *static_cast<const gles2::cmds::PushGroupMarkerEXT*>(cmd_data);
+ (void)c;
+
+ GLuint bucket_id = static_cast<GLuint>(c.bucket_id);
+ Bucket* bucket = GetBucket(bucket_id);
+ if (!bucket || bucket->size() == 0) {
+ return error::kInvalidArguments;
+ }
+ std::string str;
+ if (!bucket->GetAsString(&str)) {
+ return error::kInvalidArguments;
+ }
+ DoPushGroupMarkerEXT(0, str.c_str());
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandlePopGroupMarkerEXT(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::PopGroupMarkerEXT& c =
+ *static_cast<const gles2::cmds::PopGroupMarkerEXT*>(cmd_data);
+ (void)c;
+ DoPopGroupMarkerEXT();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGenVertexArraysOESImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GenVertexArraysOESImmediate& c =
+ *static_cast<const gles2::cmds::GenVertexArraysOESImmediate*>(cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ GLuint* arrays =
+ GetImmediateDataAs<GLuint*>(c, data_size, immediate_data_size);
+ if (arrays == NULL) {
+ return error::kOutOfBounds;
+ }
+ if (!GenVertexArraysOESHelper(n, arrays)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDeleteVertexArraysOESImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DeleteVertexArraysOESImmediate& c =
+ *static_cast<const gles2::cmds::DeleteVertexArraysOESImmediate*>(
+ cmd_data);
+ (void)c;
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLuint* arrays =
+ GetImmediateDataAs<const GLuint*>(c, data_size, immediate_data_size);
+ if (arrays == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteVertexArraysOESHelper(n, arrays);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsVertexArrayOES(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::IsVertexArrayOES& c =
+ *static_cast<const gles2::cmds::IsVertexArrayOES*>(cmd_data);
+ (void)c;
+ GLuint array = c.array;
+ typedef cmds::IsVertexArrayOES::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsVertexArrayOES(array);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindVertexArrayOES(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindVertexArrayOES& c =
+ *static_cast<const gles2::cmds::BindVertexArrayOES*>(cmd_data);
+ (void)c;
+ GLuint array = c.array;
+ DoBindVertexArrayOES(array);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleSwapBuffers(uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::SwapBuffers& c =
+ *static_cast<const gles2::cmds::SwapBuffers*>(cmd_data);
+ (void)c;
+ DoSwapBuffers();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleGetMaxValueInBufferCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::GetMaxValueInBufferCHROMIUM& c =
+ *static_cast<const gles2::cmds::GetMaxValueInBufferCHROMIUM*>(cmd_data);
+ (void)c;
+ GLuint buffer_id = c.buffer_id;
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLuint offset = static_cast<GLuint>(c.offset);
+ typedef cmds::GetMaxValueInBufferCHROMIUM::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glGetMaxValueInBufferCHROMIUM", "count < 0");
+ return error::kNoError;
+ }
+ if (!validators_->get_max_index_type.IsValid(type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glGetMaxValueInBufferCHROMIUM", type, "type");
+ return error::kNoError;
+ }
+ *result_dst = DoGetMaxValueInBufferCHROMIUM(buffer_id, count, type, offset);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTexImageIOSurface2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TexImageIOSurface2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::TexImageIOSurface2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLuint ioSurfaceId = static_cast<GLuint>(c.ioSurfaceId);
+ GLuint plane = static_cast<GLuint>(c.plane);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glTexImageIOSurface2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "height < 0");
+ return error::kNoError;
+ }
+ DoTexImageIOSurface2DCHROMIUM(target, width, height, ioSurfaceId, plane);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::CopyTextureCHROMIUM& c =
+ *static_cast<const gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint level = static_cast<GLint>(c.level);
+ GLint internalformat = static_cast<GLint>(c.internalformat);
+ GLenum dest_type = static_cast<GLenum>(c.dest_type);
+ if (!validators_->texture_internal_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glCopyTextureCHROMIUM",
+ "internalformat GL_INVALID_VALUE");
+ return error::kNoError;
+ }
+ if (!validators_->pixel_type.IsValid(dest_type)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glCopyTextureCHROMIUM", dest_type, "dest_type");
+ return error::kNoError;
+ }
+ DoCopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleProduceTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ProduceTextureCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ProduceTextureCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glProduceTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoProduceTextureCHROMIUM(target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleProduceTextureDirectCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ProduceTextureDirectCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ProduceTextureDirectCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLuint texture = c.texture;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glProduceTextureDirectCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoProduceTextureDirectCHROMIUM(texture, target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleConsumeTextureCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ConsumeTextureCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::ConsumeTextureCHROMIUMImmediate*>(
+ cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLbyte), 64, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLbyte* mailbox =
+ GetImmediateDataAs<const GLbyte*>(c, data_size, immediate_data_size);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glConsumeTextureCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoConsumeTextureCHROMIUM(target, mailbox);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBindTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::BindTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::BindTexImage2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBindTexImage2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleReleaseTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::ReleaseTexImage2DCHROMIUM& c =
+ *static_cast<const gles2::cmds::ReleaseTexImage2DCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glReleaseTexImage2DCHROMIUM", target, "target");
+ return error::kNoError;
+ }
+ DoReleaseTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleTraceEndCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::TraceEndCHROMIUM& c =
+ *static_cast<const gles2::cmds::TraceEndCHROMIUM*>(cmd_data);
+ (void)c;
+ DoTraceEndCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDiscardFramebufferEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DiscardFramebufferEXTImmediate& c =
+ *static_cast<const gles2::cmds::DiscardFramebufferEXTImmediate*>(
+ cmd_data);
+ (void)c;
+ if (!features().ext_discard_framebuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glDiscardFramebufferEXT",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLenum), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* attachments =
+ GetImmediateDataAs<const GLenum*>(c, data_size, immediate_data_size);
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, "glDiscardFramebufferEXT", "count < 0");
+ return error::kNoError;
+ }
+ if (attachments == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoDiscardFramebufferEXT(target, count, attachments);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleLoseContextCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::LoseContextCHROMIUM& c =
+ *static_cast<const gles2::cmds::LoseContextCHROMIUM*>(cmd_data);
+ (void)c;
+ GLenum current = static_cast<GLenum>(c.current);
+ GLenum other = static_cast<GLenum>(c.other);
+ if (!validators_->reset_status.IsValid(current)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glLoseContextCHROMIUM", current, "current");
+ return error::kNoError;
+ }
+ if (!validators_->reset_status.IsValid(other)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", other, "other");
+ return error::kNoError;
+ }
+ DoLoseContextCHROMIUM(current, other);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDrawBuffersEXTImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::DrawBuffersEXTImmediate& c =
+ *static_cast<const gles2::cmds::DrawBuffersEXTImmediate*>(cmd_data);
+ (void)c;
+ GLsizei count = static_cast<GLsizei>(c.count);
+ uint32_t data_size;
+ if (!ComputeDataSize(count, sizeof(GLenum), 1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLenum* bufs =
+ GetImmediateDataAs<const GLenum*>(c, data_size, immediate_data_size);
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDrawBuffersEXT", "count < 0");
+ return error::kNoError;
+ }
+ if (bufs == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoDrawBuffersEXT(count, bufs);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMatrixLoadfCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::MatrixLoadfCHROMIUMImmediate& c =
+ *static_cast<const gles2::cmds::MatrixLoadfCHROMIUMImmediate*>(cmd_data);
+ (void)c;
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadfCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
+ uint32_t data_size;
+ if (!ComputeDataSize(1, sizeof(GLfloat), 16, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ const GLfloat* m =
+ GetImmediateDataAs<const GLfloat*>(c, data_size, immediate_data_size);
+ if (!validators_->matrix_mode.IsValid(matrixMode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glMatrixLoadfCHROMIUM", matrixMode, "matrixMode");
+ return error::kNoError;
+ }
+ if (m == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoMatrixLoadfCHROMIUM(matrixMode, m);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMatrixLoadIdentityCHROMIUM(
+ uint32_t immediate_data_size,
+ const void* cmd_data) {
+ const gles2::cmds::MatrixLoadIdentityCHROMIUM& c =
+ *static_cast<const gles2::cmds::MatrixLoadIdentityCHROMIUM*>(cmd_data);
+ (void)c;
+ if (!features().chromium_path_rendering) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glMatrixLoadIdentityCHROMIUM",
+ "function not available");
+ return error::kNoError;
+ }
+
+ GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
+ if (!validators_->matrix_mode.IsValid(matrixMode)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glMatrixLoadIdentityCHROMIUM", matrixMode, "matrixMode");
+ return error::kNoError;
+ }
+ DoMatrixLoadIdentityCHROMIUM(matrixMode);
+ return error::kNoError;
+}
+
+bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
+ switch (cap) {
+ case GL_BLEND:
+ state_.enable_flags.blend = enabled;
+ if (state_.enable_flags.cached_blend != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_blend = enabled;
+ return true;
+ }
+ return false;
+ case GL_CULL_FACE:
+ state_.enable_flags.cull_face = enabled;
+ if (state_.enable_flags.cached_cull_face != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_cull_face = enabled;
+ return true;
+ }
+ return false;
+ case GL_DEPTH_TEST:
+ state_.enable_flags.depth_test = enabled;
+ if (state_.enable_flags.cached_depth_test != enabled ||
+ state_.ignore_cached_state) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return false;
+ case GL_DITHER:
+ state_.enable_flags.dither = enabled;
+ if (state_.enable_flags.cached_dither != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_dither = enabled;
+ return true;
+ }
+ return false;
+ case GL_POLYGON_OFFSET_FILL:
+ state_.enable_flags.polygon_offset_fill = enabled;
+ if (state_.enable_flags.cached_polygon_offset_fill != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_polygon_offset_fill = enabled;
+ return true;
+ }
+ return false;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ state_.enable_flags.sample_alpha_to_coverage = enabled;
+ if (state_.enable_flags.cached_sample_alpha_to_coverage != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_sample_alpha_to_coverage = enabled;
+ return true;
+ }
+ return false;
+ case GL_SAMPLE_COVERAGE:
+ state_.enable_flags.sample_coverage = enabled;
+ if (state_.enable_flags.cached_sample_coverage != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_sample_coverage = enabled;
+ return true;
+ }
+ return false;
+ case GL_SCISSOR_TEST:
+ state_.enable_flags.scissor_test = enabled;
+ if (state_.enable_flags.cached_scissor_test != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_scissor_test = enabled;
+ return true;
+ }
+ return false;
+ case GL_STENCIL_TEST:
+ state_.enable_flags.stencil_test = enabled;
+ if (state_.enable_flags.cached_stencil_test != enabled ||
+ state_.ignore_cached_state) {
+ framebuffer_state_.clear_state_dirty = true;
+ }
+ return false;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc b/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
new file mode 100644
index 0000000..ff93ba1
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+MockGLES2Decoder::MockGLES2Decoder()
+ : GLES2Decoder() {
+ ON_CALL(*this, GetCommandName(testing::_))
+ .WillByDefault(testing::Return(""));
+ ON_CALL(*this, MakeCurrent())
+ .WillByDefault(testing::Return(true));
+}
+
+MockGLES2Decoder::~MockGLES2Decoder() {}
+
+error::Error MockGLES2Decoder::FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ return AsyncAPIInterface::DoCommands(
+ num_commands, buffer, num_entries, entries_processed);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
new file mode 100644
index 0000000..7346d8e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the mock GLES2Decoder class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
+
+#include <vector>
+
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "base/callback_forward.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gfx/size.h"
+
+namespace gfx {
+class GLContext;
+class GLSurface;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class ContextGroup;
+class ErrorState;
+class QueryManager;
+struct ContextState;
+
+class MockGLES2Decoder : public GLES2Decoder {
+ public:
+ MockGLES2Decoder();
+ virtual ~MockGLES2Decoder();
+
+ error::Error FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ MOCK_METHOD6(Initialize,
+ bool(const scoped_refptr<gfx::GLSurface>& surface,
+ const scoped_refptr<gfx::GLContext>& context,
+ bool offscreen,
+ const gfx::Size& size,
+ const DisallowedFeatures& disallowed_features,
+ const std::vector<int32>& attribs));
+ MOCK_METHOD1(Destroy, void(bool have_context));
+ MOCK_METHOD1(SetSurface, void(const scoped_refptr<gfx::GLSurface>& surface));
+ MOCK_METHOD1(ProduceFrontBuffer, void(const Mailbox& mailbox));
+ MOCK_METHOD1(ResizeOffscreenFrameBuffer, bool(const gfx::Size& size));
+ MOCK_METHOD0(MakeCurrent, bool());
+ MOCK_METHOD1(GetServiceIdForTesting, uint32(uint32 client_id));
+ MOCK_METHOD0(GetGLES2Util, GLES2Util*());
+ MOCK_METHOD0(GetGLSurface, gfx::GLSurface*());
+ MOCK_METHOD0(GetGLContext, gfx::GLContext*());
+ MOCK_METHOD0(GetContextGroup, ContextGroup*());
+ MOCK_METHOD0(GetContextState, const ContextState*());
+ MOCK_METHOD0(GetCapabilities, Capabilities());
+ MOCK_METHOD0(ProcessPendingQueries, bool());
+ MOCK_METHOD0(HasMoreIdleWork, bool());
+ MOCK_METHOD0(PerformIdleWork, void());
+ MOCK_METHOD1(RestoreState, void(const ContextState* prev_state));
+ MOCK_CONST_METHOD0(RestoreActiveTexture, void());
+ MOCK_CONST_METHOD1(
+ RestoreAllTextureUnitBindings, void(const ContextState* state));
+ MOCK_CONST_METHOD1(
+ RestoreActiveTextureUnitBinding, void(unsigned int target));
+ MOCK_CONST_METHOD0(RestoreBufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreGlobalState, void());
+ MOCK_CONST_METHOD0(RestoreProgramBindings, void());
+ MOCK_METHOD0(RestoreRenderbufferBindings, void());
+ MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
+ MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
+ MOCK_CONST_METHOD0(ClearAllAttributes, void());
+ MOCK_CONST_METHOD0(RestoreAllAttributes, void());
+ MOCK_METHOD0(GetQueryManager, gpu::gles2::QueryManager*());
+ MOCK_METHOD0(GetVertexArrayManager, gpu::gles2::VertexArrayManager*());
+ MOCK_METHOD0(GetImageManager, gpu::gles2::ImageManager*());
+ MOCK_METHOD1(
+ SetResizeCallback, void(const base::Callback<void(gfx::Size, float)>&));
+ MOCK_METHOD0(GetAsyncPixelTransferDelegate,
+ AsyncPixelTransferDelegate*());
+ MOCK_METHOD0(GetAsyncPixelTransferManager,
+ AsyncPixelTransferManager*());
+ MOCK_METHOD0(ResetAsyncPixelTransferManagerForTest, void());
+ MOCK_METHOD1(SetAsyncPixelTransferManagerForTest,
+ void(AsyncPixelTransferManager*));
+ MOCK_METHOD1(SetIgnoreCachedStateForTest, void(bool ignore));
+ MOCK_METHOD3(DoCommand, error::Error(unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data));
+ MOCK_METHOD4(DoCommands,
+ error::Error(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed));
+ MOCK_METHOD2(GetServiceTextureId, bool(uint32 client_texture_id,
+ uint32* service_texture_id));
+ MOCK_METHOD0(GetContextLostReason, error::ContextLostReason());
+ MOCK_CONST_METHOD1(GetCommandName, const char*(unsigned int command_id));
+ MOCK_METHOD10(ClearLevel, bool(
+ unsigned service_id,
+ unsigned bind_target,
+ unsigned target,
+ int level,
+ unsigned internal_format,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ bool is_texture_immutable));
+ MOCK_METHOD0(GetErrorState, ErrorState *());
+
+ MOCK_METHOD0(GetLogger, Logger*());
+ MOCK_METHOD1(SetShaderCacheCallback,
+ void(const ShaderCacheCallback& callback));
+ MOCK_METHOD1(SetWaitSyncPointCallback,
+ void(const WaitSyncPointCallback& callback));
+ MOCK_METHOD1(WaitForReadPixels,
+ void(base::Closure callback));
+ MOCK_METHOD0(GetTextureUploadCount, uint32());
+ MOCK_METHOD0(GetTotalTextureUploadTime, base::TimeDelta());
+ MOCK_METHOD0(GetTotalProcessingCommandsTime, base::TimeDelta());
+ MOCK_METHOD1(AddProcessingCommandsTime, void(base::TimeDelta));
+ MOCK_METHOD0(WasContextLost, bool());
+ MOCK_METHOD0(WasContextLostByRobustnessExtension, bool());
+ MOCK_METHOD1(LoseContext, void(uint32 reset_status));
+
+ DISALLOW_COPY_AND_ASSIGN(MockGLES2Decoder);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_MOCK_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
new file mode 100644
index 0000000..e97b4c4
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -0,0 +1,1317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+void GLES2DecoderRGBBackbufferTest::SetUp() {
+ // Test codepath with workaround clear_alpha_in_readpixels because
+ // ReadPixelsEmulator emulates the incorrect driver behavior.
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::CLEAR_ALPHA_IN_READPIXELS));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ SetupDefaultProgram();
+}
+
+// Override default setup so nothing gets setup.
+void GLES2DecoderManualInitTest::SetUp() {
+}
+
+void GLES2DecoderManualInitTest::EnableDisableTest(GLenum cap,
+ bool enable,
+ bool expect_set) {
+ if (expect_set) {
+ SetupExpectationsForEnableDisable(cap, enable);
+ }
+ if (enable) {
+ Enable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ } else {
+ Disable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderTest, GetIntegervCached) {
+ struct TestInfo {
+ GLenum pname;
+ GLint expected;
+ };
+ TestInfo tests[] = {
+ {
+ GL_MAX_TEXTURE_SIZE, TestHelper::kMaxTextureSize,
+ },
+ {
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE, TestHelper::kMaxCubeMapTextureSize,
+ },
+ {
+ GL_MAX_RENDERBUFFER_SIZE, TestHelper::kMaxRenderbufferSize,
+ },
+ };
+ typedef GetIntegerv::Result Result;
+ for (size_t ii = 0; ii < sizeof(tests) / sizeof(tests[0]); ++ii) {
+ const TestInfo& test = tests[ii];
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(test.pname, _)).Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(test.pname, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(test.pname),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(test.expected, result->GetData()[0]);
+ }
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetMaxValueInBufferCHROMIUM) {
+ SetupIndexBuffer();
+ GetMaxValueInBufferCHROMIUM::Result* result =
+ static_cast<GetMaxValueInBufferCHROMIUM::Result*>(shared_memory_address_);
+ *result = 0;
+
+ GetMaxValueInBufferCHROMIUM cmd;
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(7u, *result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(100u, *result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(kInvalidClientId,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kOutOfRangeIndexRangeEnd,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kOutOfRangeIndexRangeEnd * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_element_buffer_id_,
+ kValidIndexRangeCount + 1,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, IsBuffer) {
+ EXPECT_FALSE(DoIsBuffer(client_buffer_id_));
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ EXPECT_TRUE(DoIsBuffer(client_buffer_id_));
+ DoDeleteBuffer(client_buffer_id_, kServiceBufferId);
+ EXPECT_FALSE(DoIsBuffer(client_buffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsFramebuffer) {
+ EXPECT_FALSE(DoIsFramebuffer(client_framebuffer_id_));
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_TRUE(DoIsFramebuffer(client_framebuffer_id_));
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ true,
+ GL_FRAMEBUFFER,
+ 0,
+ true,
+ GL_FRAMEBUFFER,
+ 0);
+ EXPECT_FALSE(DoIsFramebuffer(client_framebuffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsProgram) {
+ // IsProgram is true as soon as the program is created.
+ EXPECT_TRUE(DoIsProgram(client_program_id_));
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+ EXPECT_FALSE(DoIsProgram(client_program_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsRenderbuffer) {
+ EXPECT_FALSE(DoIsRenderbuffer(client_renderbuffer_id_));
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_TRUE(DoIsRenderbuffer(client_renderbuffer_id_));
+ DoDeleteRenderbuffer(client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_FALSE(DoIsRenderbuffer(client_renderbuffer_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsShader) {
+ // IsShader is true as soon as the program is created.
+ EXPECT_TRUE(DoIsShader(client_shader_id_));
+ DoDeleteShader(client_shader_id_, kServiceShaderId);
+ EXPECT_FALSE(DoIsShader(client_shader_id_));
+}
+
+TEST_P(GLES2DecoderTest, IsTexture) {
+ EXPECT_FALSE(DoIsTexture(client_texture_id_));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_TRUE(DoIsTexture(client_texture_id_));
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(DoIsTexture(client_texture_id_));
+}
+
+TEST_P(GLES2DecoderTest, GetMultipleIntegervCHROMIUMValidArgs) {
+ const GLsizei kCount = 3;
+ GLenum* pnames = GetSharedMemoryAs<GLenum*>();
+ pnames[0] = GL_DEPTH_WRITEMASK;
+ pnames[1] = GL_COLOR_WRITEMASK;
+ pnames[2] = GL_STENCIL_WRITEMASK;
+ GLint* results =
+ GetSharedMemoryAsWithOffset<GLint*>(sizeof(*pnames) * kCount);
+
+ GLsizei num_results = 0;
+ for (GLsizei ii = 0; ii < kCount; ++ii) {
+ num_results += decoder_->GetGLES2Util()->GLGetNumValuesReturned(pnames[ii]);
+ }
+ const GLsizei result_size = num_results * sizeof(*results);
+ memset(results, 0, result_size);
+
+ const GLint kSentinel = 0x12345678;
+ results[num_results] = kSentinel;
+
+ GetMultipleIntegervCHROMIUM cmd;
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + sizeof(*pnames) * kCount,
+ result_size);
+
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, results[0]); // Depth writemask
+ EXPECT_EQ(1, results[1]); // color writemask red
+ EXPECT_EQ(1, results[2]); // color writemask green
+ EXPECT_EQ(1, results[3]); // color writemask blue
+ EXPECT_EQ(1, results[4]); // color writemask alpha
+ EXPECT_EQ(-1, results[5]); // stencil writemask alpha
+ EXPECT_EQ(kSentinel, results[num_results]); // End of results
+}
+
+TEST_P(GLES2DecoderTest, GetMultipleIntegervCHROMIUMInvalidArgs) {
+ const GLsizei kCount = 3;
+ // Offset the pnames because GLGetError will use the first uint32.
+ const uint32 kPnameOffset = sizeof(uint32);
+ const uint32 kResultsOffset = kPnameOffset + sizeof(GLint) * kCount;
+ GLenum* pnames = GetSharedMemoryAsWithOffset<GLenum*>(kPnameOffset);
+ pnames[0] = GL_DEPTH_WRITEMASK;
+ pnames[1] = GL_COLOR_WRITEMASK;
+ pnames[2] = GL_STENCIL_WRITEMASK;
+ GLint* results = GetSharedMemoryAsWithOffset<GLint*>(kResultsOffset);
+
+ GLsizei num_results = 0;
+ for (GLsizei ii = 0; ii < kCount; ++ii) {
+ num_results += decoder_->GetGLES2Util()->GLGetNumValuesReturned(pnames[ii]);
+ }
+ const GLsizei result_size = num_results * sizeof(*results);
+ memset(results, 0, result_size);
+
+ const GLint kSentinel = 0x12345678;
+ results[num_results] = kSentinel;
+
+ GetMultipleIntegervCHROMIUM cmd;
+ // Check bad pnames pointer.
+ cmd.Init(kInvalidSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad pnames pointer.
+ cmd.Init(kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad count.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ static_cast<GLuint>(-1),
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad results pointer.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad results pointer.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ result_size);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check bad size.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad size.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size - 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad enum.
+ cmd.Init(kSharedMemoryId,
+ kSharedMemoryOffset + kPnameOffset,
+ kCount,
+ kSharedMemoryId,
+ kSharedMemoryOffset + kResultsOffset,
+ result_size);
+ GLenum temp = pnames[2];
+ pnames[2] = GL_TRUE;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ pnames[2] = temp;
+ // Check results area has not been cleared by client.
+ results[1] = 1;
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+ // Check buffer is what we expect
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(1, results[1]);
+ EXPECT_EQ(0, results[2]);
+ EXPECT_EQ(0, results[3]);
+ EXPECT_EQ(0, results[4]);
+ EXPECT_EQ(0, results[5]);
+ EXPECT_EQ(kSentinel, results[num_results]); // End of results
+}
+
+TEST_P(GLES2DecoderManualInitTest, BindGeneratesResourceFalse) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindBuffer cmd2;
+ cmd2.Init(GL_ARRAY_BUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindFramebuffer cmd3;
+ cmd3.Init(GL_FRAMEBUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd3));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BindRenderbuffer cmd4;
+ cmd4.Init(GL_RENDERBUFFER, kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd4));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, EnableFeatureCHROMIUMBadBucket) {
+ const uint32 kBadBucketId = 123;
+ EnableFeatureCHROMIUM cmd;
+ cmd.Init(kBadBucketId, shared_memory_id_, shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, RequestExtensionCHROMIUMBadBucket) {
+ const uint32 kBadBucketId = 123;
+ RequestExtensionCHROMIUM cmd;
+ cmd.Init(kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, BeginQueryEXTDisabled) {
+ // Test something fails if off.
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXT) {
+ InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Test end fails if no begin.
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ BeginQueryEXT begin_cmd;
+
+ // Test id = 0 fails.
+ begin_cmd.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, 0, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_ANY_SAMPLES_PASSED_EXT, kNewServiceId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ // Query object should not be created untill BeginQueriesEXT.
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ EXPECT_TRUE(query == NULL);
+
+ // BeginQueryEXT should fail if id is not generated from GenQueriesEXT.
+ begin_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT,
+ kInvalidClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ begin_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // After BeginQueriesEXT id name should have query object associated with it.
+ query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test trying begin again fails
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test end fails with different target
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test end succeeds
+ EXPECT_CALL(*gl_, EndQueryARB(GL_ANY_SAMPLES_PASSED_EXT))
+ .Times(1)
+ .RetiresOnSaturation();
+ end_cmd.Init(GL_ANY_SAMPLES_PASSED_EXT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(query->pending());
+
+ EXPECT_CALL(*gl_, DeleteQueriesARB(1, _)).Times(1).RetiresOnSaturation();
+}
+
+struct QueryType {
+ GLenum type;
+ bool is_gl;
+};
+
+const QueryType kQueryTypes[] = {
+ {GL_COMMANDS_ISSUED_CHROMIUM, false},
+ {GL_LATENCY_QUERY_CHROMIUM, false},
+ {GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM, false},
+ {GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM, false},
+ {GL_GET_ERROR_QUERY_CHROMIUM, false},
+ {GL_COMMANDS_COMPLETED_CHROMIUM, false},
+ {GL_ANY_SAMPLES_PASSED_EXT, true},
+};
+
+static void CheckBeginEndQueryBadMemoryFails(GLES2DecoderTestBase* test,
+ GLuint client_id,
+ GLuint service_id,
+ const QueryType& query_type,
+ int32 shm_id,
+ uint32 shm_offset) {
+ // We need to reset the decoder on each iteration, because we lose the
+ // context every time.
+ GLES2DecoderTestBase::InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean GL_ARB_sync";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ test->InitDecoder(init);
+ ::testing::StrictMock< ::gfx::MockGLInterface>* gl = test->GetGLMock();
+
+ BeginQueryEXT begin_cmd;
+
+ test->GenHelper<GenQueriesEXTImmediate>(client_id);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(service_id))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BeginQueryARB(query_type.type, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ // Test bad shared memory fails
+ begin_cmd.Init(query_type.type, client_id, shm_id, shm_offset);
+ error::Error error1 = test->ExecuteCmd(begin_cmd);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, EndQueryARB(query_type.type))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (query_type.type == GL_GET_ERROR_QUERY_CHROMIUM) {
+ EXPECT_CALL(*gl, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+ GLsync kGlSync = reinterpret_cast<GLsync>(0xdeadbeef);
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+ EXPECT_CALL(*gl, Flush()).RetiresOnSaturation();
+ EXPECT_CALL(*gl, FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
+ .WillOnce(Return(kGlSync))
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ }
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(query_type.type, 1);
+ error::Error error2 = test->ExecuteCmd(end_cmd);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(
+ *gl, GetQueryObjectuivARB(service_id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetQueryObjectuivARB(service_id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ }
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_ALREADY_SIGNALED))
+ .RetiresOnSaturation();
+ }
+
+ QueryManager* query_manager = test->GetDecoder()->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ bool process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(error1 != error::kNoError || error2 != error::kNoError ||
+ !process_success);
+
+ if (query_type.is_gl) {
+ EXPECT_CALL(*gl, DeleteQueriesARB(1, _)).Times(1).RetiresOnSaturation();
+ }
+ if (query_type.type == GL_COMMANDS_COMPLETED_CHROMIUM) {
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl, DeleteSync(kGlSync)).Times(1).RetiresOnSaturation();
+ }
+ test->ResetDecoder();
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryIdFails) {
+ for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryOffsetFails) {
+ for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ // Out-of-bounds.
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ // Overflow.
+ CheckBeginEndQueryBadMemoryFails(this,
+ kNewClientId,
+ kNewServiceId,
+ kQueryTypes[i],
+ kSharedMemoryId,
+ 0xfffffffcu);
+ }
+}
+
+TEST_P(GLES2DecoderTest, BeginEndQueryEXTCommandsIssuedCHROMIUM) {
+ BeginQueryEXT begin_cmd;
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ begin_cmd.Init(GL_COMMANDS_ISSUED_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test end succeeds
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_COMMANDS_ISSUED_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(query->pending());
+}
+
+TEST_P(GLES2DecoderTest, BeginEndQueryEXTGetErrorQueryCHROMIUM) {
+ BeginQueryEXT begin_cmd;
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ // Test valid parameters work.
+ begin_cmd.Init(GL_GET_ERROR_QUERY_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ // Test end succeeds
+ QuerySync* sync = static_cast<QuerySync*>(shared_memory_address_);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_INVALID_VALUE))
+ .RetiresOnSaturation();
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_GET_ERROR_QUERY_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE),
+ static_cast<GLenum>(sync->result));
+}
+
+TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTCommandsCompletedCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_EXT_occlusion_query_boolean GL_ARB_sync";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GenHelper<GenQueriesEXTImmediate>(kNewClientId);
+
+ BeginQueryEXT begin_cmd;
+ begin_cmd.Init(GL_COMMANDS_COMPLETED_CHROMIUM,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ QueryManager* query_manager = decoder_->GetQueryManager();
+ ASSERT_TRUE(query_manager != NULL);
+ QueryManager::Query* query = query_manager->GetQuery(kNewClientId);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_FALSE(query->pending());
+
+ GLsync kGlSync = reinterpret_cast<GLsync>(0xdeadbeef);
+ EXPECT_CALL(*gl_, Flush()).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
+ .WillOnce(Return(kGlSync))
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+
+ EndQueryEXT end_cmd;
+ end_cmd.Init(GL_COMMANDS_COMPLETED_CHROMIUM, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(query->pending());
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_TIMEOUT_EXPIRED))
+ .RetiresOnSaturation();
+ bool process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(process_success);
+ EXPECT_TRUE(query->pending());
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, ClientWaitSync(kGlSync, _, _))
+ .WillOnce(Return(GL_ALREADY_SIGNALED))
+ .RetiresOnSaturation();
+ process_success = query_manager->ProcessPendingQueries();
+
+ EXPECT_TRUE(process_success);
+ EXPECT_FALSE(query->pending());
+ QuerySync* sync = static_cast<QuerySync*>(shared_memory_address_);
+ EXPECT_EQ(static_cast<GLenum>(0), static_cast<GLenum>(sync->result));
+
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl_, IsSync(kGlSync))
+ .WillOnce(Return(GL_TRUE))
+ .RetiresOnSaturation();
+#endif
+ EXPECT_CALL(*gl_, DeleteSync(kGlSync)).Times(1).RetiresOnSaturation();
+ ResetDecoder();
+}
+
+TEST_P(GLES2DecoderTest, IsEnabledReturnsCachedValue) {
+ // NOTE: There are no expectations because no GL functions should be
+ // called for DEPTH_TEST or STENCIL_TEST
+ static const GLenum kStates[] = {
+ GL_DEPTH_TEST, GL_STENCIL_TEST,
+ };
+ for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ Enable enable_cmd;
+ GLenum state = kStates[ii];
+ enable_cmd.Init(state);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(enable_cmd));
+ IsEnabled::Result* result =
+ static_cast<IsEnabled::Result*>(shared_memory_address_);
+ IsEnabled is_enabled_cmd;
+ is_enabled_cmd.Init(state, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(is_enabled_cmd));
+ EXPECT_NE(0u, *result);
+ Disable disable_cmd;
+ disable_cmd.Init(state);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(disable_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(is_enabled_cmd));
+ EXPECT_EQ(0u, *result);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, GpuMemoryManagerCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->pool() == GL_TEXTURE_POOL_UNMANAGED_CHROMIUM);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_TRUE(texture->pool() == GL_TEXTURE_POOL_MANAGED_CHROMIUM);
+
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_POOL_CHROMIUM, GL_NONE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+namespace {
+
+class SizeOnlyMemoryTracker : public MemoryTracker {
+ public:
+ SizeOnlyMemoryTracker() {
+ // These are the default textures. 1 for TEXTURE_2D and 6 faces for
+ // TEXTURE_CUBE_MAP.
+ const size_t kInitialUnmanagedPoolSize = 7 * 4;
+ const size_t kInitialManagedPoolSize = 0;
+ pool_infos_[MemoryTracker::kUnmanaged].initial_size =
+ kInitialUnmanagedPoolSize;
+ pool_infos_[MemoryTracker::kManaged].initial_size = kInitialManagedPoolSize;
+ }
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ MOCK_METHOD1(EnsureGPUMemoryAvailable, bool(size_t size_needed));
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) {
+ PoolInfo& info = pool_infos_[pool];
+ info.size += new_size - old_size;
+ }
+
+ size_t GetPoolSize(Pool pool) {
+ const PoolInfo& info = pool_infos_[pool];
+ return info.size - info.initial_size;
+ }
+
+ private:
+ virtual ~SizeOnlyMemoryTracker() {}
+ struct PoolInfo {
+ PoolInfo() : initial_size(0), size(0) {}
+ size_t initial_size;
+ size_t size;
+ };
+ std::map<Pool, PoolInfo> pool_infos_;
+};
+
+} // anonymous namespace.
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerInitialSize) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ // Expect that initial size - size is 0.
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerTexImage2D) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(64))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(64u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check we get out of memory and no call to glTexImage2D if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(64))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ TexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(64u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerTexStorage2DEXT) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Check we get out of memory and no call to glTexStorage2DEXT
+ // if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ TexStorage2DEXT cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, GL_RGBA8, 8, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerCopyTexImage2D) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 4;
+ GLsizei height = 8;
+ GLint border = 0;
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ target, level, internal_format, 0, 0, width, height, border))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Check we get out of memory and no call to glCopyTexImage2D if Ensure fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerRenderbufferStorage) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(GL_RENDERBUFFER, GL_RGBA, 8, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 8, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+ // Check we get out of memory and no call to glRenderbufferStorage if Ensure
+ // fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, MemoryTrackerBufferData) {
+ scoped_refptr<SizeOnlyMemoryTracker> memory_tracker =
+ new SizeOnlyMemoryTracker();
+ set_memory_tracker(memory_tracker.get());
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(GL_ARRAY_BUFFER, 128, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ BufferData cmd;
+ cmd.Init(GL_ARRAY_BUFFER, 128, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+ // Check we get out of memory and no call to glBufferData if Ensure
+ // fails.
+ EXPECT_CALL(*memory_tracker.get(), EnsureGPUMemoryAvailable(128))
+ .WillOnce(Return(false))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(128u, memory_tracker->GetPoolSize(MemoryTracker::kManaged));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ImmutableCopyTexImage2D) {
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kInternalFormat = GL_RGBA;
+ const GLenum kSizedInternalFormat = GL_RGBA8;
+ const GLsizei kWidth = 4;
+ const GLsizei kHeight = 8;
+ const GLint kBorder = 0;
+ InitState init;
+ init.extensions = "GL_EXT_texture_storage";
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ // CopyTexImage2D will call arbitrary amount of GetErrors.
+ EXPECT_CALL(*gl_, GetError())
+ .Times(AtLeast(1));
+
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight,
+ kBorder))
+ .Times(1);
+
+ EXPECT_CALL(*gl_,
+ TexStorage2DEXT(
+ kTarget, kLevel, kSizedInternalFormat, kWidth, kHeight))
+ .Times(1);
+ CopyTexImage2D copy_cmd;
+ copy_cmd.Init(kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexStorage2DEXT storage_cmd;
+ storage_cmd.Init(kTarget, kLevel, kSizedInternalFormat, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(storage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // This should not invoke CopyTexImage2D.
+ copy_cmd.Init(kTarget, kLevel, kInternalFormat, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMValidArgs) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(GL_GUILTY_CONTEXT_RESET_ARB))
+ .Times(1);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(error::kLostContext, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMInvalidArgs0_0) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(_))
+ .Times(0);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_NONE, GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, LoseContextCHROMIUMInvalidArgs1_0) {
+ EXPECT_CALL(*mock_decoder_, LoseContext(_))
+ .Times(0);
+ cmds::LoseContextCHROMIUM cmd;
+ cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_NONE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+class GLES2DecoderDoCommandsTest : public GLES2DecoderTest {
+ public:
+ GLES2DecoderDoCommandsTest() {
+ for (int i = 0; i < 3; i++) {
+ cmds_[i].Init(GL_BLEND);
+ }
+ entries_per_cmd_ = ComputeNumEntries(cmds_[0].ComputeSize());
+ }
+
+ void SetExpectationsForNCommands(int num_commands) {
+ for (int i = 0; i < num_commands; i++)
+ SetupExpectationsForEnableDisable(GL_BLEND, true);
+ }
+
+ protected:
+ Enable cmds_[3];
+ int entries_per_cmd_;
+};
+
+// Test that processing with 0 entries does nothing.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOneOfZero) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(0);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(1, &cmds_, entries_per_cmd_ * 0, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, num_processed);
+}
+
+// Test processing at granularity of single commands.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOneOfOne) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(1, &cmds_, entries_per_cmd_ * 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test processing at granularity of multiple commands.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsThreeOfThree) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(3);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(3, &cmds_, entries_per_cmd_ * 3, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ * 3, num_processed);
+}
+
+// Test processing a request smaller than available entries.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsTwoOfThree) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(2);
+ EXPECT_EQ(
+ error::kNoError,
+ decoder_->DoCommands(2, &cmds_, entries_per_cmd_ * 3, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ * 2, num_processed);
+}
+
+// Test that processing stops on a command with size 0.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsZeroCmdSize) {
+ cmds_[1].header.size = 0;
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(
+ error::kInvalidSize,
+ decoder_->DoCommands(2, &cmds_, entries_per_cmd_ * 2, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test that processing stops on a command with size greater than available.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOutOfBounds) {
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(error::kOutOfBounds,
+ decoder_->DoCommands(
+ 2, &cmds_, entries_per_cmd_ * 2 - 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_, num_processed);
+}
+
+// Test that commands with bad argument size are skipped without processing.
+TEST_P(GLES2DecoderDoCommandsTest, DoCommandsBadArgSize) {
+ cmds_[1].header.size += 1;
+ int num_processed = -1;
+ SetExpectationsForNCommands(1);
+ EXPECT_EQ(error::kInvalidArguments,
+ decoder_->DoCommands(
+ 2, &cmds_, entries_per_cmd_ * 2 + 1, &num_processed));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(entries_per_cmd_ + cmds_[1].header.size, num_processed);
+}
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderWithShaderTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderManualInitTest, ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderRGBBackbufferTest,
+ ::testing::Bool());
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderDoCommandsTest, ::testing::Bool());
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
new file mode 100644
index 0000000..fea20ab
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTest : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest() {}
+
+ protected:
+ void CheckReadPixelsOutOfRange(GLint in_read_x,
+ GLint in_read_y,
+ GLsizei in_read_width,
+ GLsizei in_read_height,
+ bool init);
+};
+
+class GLES2DecoderWithShaderTest : public GLES2DecoderWithShaderTestBase {
+ public:
+ GLES2DecoderWithShaderTest() : GLES2DecoderWithShaderTestBase() {}
+
+ void CheckTextureChangesMarkFBOAsNotComplete(bool bound_fbo);
+ void CheckRenderbufferChangesMarkFBOAsNotComplete(bool bound_fbo);
+};
+
+class GLES2DecoderRGBBackbufferTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderRGBBackbufferTest() {}
+
+ virtual void SetUp();
+};
+
+class GLES2DecoderManualInitTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderManualInitTest() {}
+
+ // Override default setup so nothing gets setup.
+ virtual void SetUp();
+
+ void DirtyStateMaskTest(GLuint color_bits,
+ bool depth_mask,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask);
+ void EnableDisableTest(GLenum cap, bool enable, bool expect_set);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
new file mode 100644
index 0000000..0aca4df
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_base.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
+
+void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations() {
+ ExpectEnableDisable(GL_BLEND, false);
+ ExpectEnableDisable(GL_CULL_FACE, false);
+ ExpectEnableDisable(GL_DEPTH_TEST, false);
+ ExpectEnableDisable(GL_DITHER, true);
+ ExpectEnableDisable(GL_POLYGON_OFFSET_FILL, false);
+ ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE, false);
+ ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
+ ExpectEnableDisable(GL_SCISSOR_TEST, false);
+ ExpectEnableDisable(GL_STENCIL_TEST, false);
+}
+
+void GLES2DecoderTestBase::SetupInitStateExpectations() {
+ EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CullFace(GL_BACK)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthFunc(GL_LESS)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthMask(true)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DepthRange(0.0f, 1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FrontFace(GL_CCW)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().oes_standard_derivatives) {
+ EXPECT_CALL(*gl_,
+ Hint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, LineWidth(1.0f)).Times(1).RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, PolygonOffset(0.0f, 0.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, SampleCoverage(1.0f, false)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ Scissor(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ Viewport(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
new file mode 100644
index 0000000..912e908
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
@@ -0,0 +1,306 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+void ShaderCacheCb(const std::string& key, const std::string& shader) {
+}
+} // namespace
+
+class GLES2DecoderTest1 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest1() { }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest1, ::testing::Bool());
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GenerateMipmap, 0>(
+ bool valid) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CheckFramebufferStatus, 0>(
+ bool /* valid */) {
+ // Give it a valid framebuffer.
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ DoRenderbufferStorage(
+ GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER,
+ client_renderbuffer_id_, kServiceRenderbufferId, GL_NO_ERROR);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Clear, 0>(bool valid) {
+ if (valid) {
+ SetupExpectationsForApplyingDefaultDirtyState();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::ColorMask, 0>(
+ bool /* valid */) {
+ // We bind a framebuffer color the colormask test since the framebuffer
+ // will be considered RGB.
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexImage2D, 0>(
+ bool valid) {
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexSubImage2D, 0>(
+ bool valid) {
+ if (valid) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 2, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::DetachShader, 0>(bool valid) {
+ if (valid) {
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(
+ bool valid) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferTexture2D, 0>(
+ bool valid) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetBufferParameteriv, 0>(bool /* valid */) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetFramebufferAttachmentParameteriv, 0>(bool /* valid */) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<
+ cmds::GetRenderbufferParameteriv, 0>(
+ bool /* valid */) {
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(
+ bool valid) {
+ if (valid) {
+ // GetProgramiv calls ClearGLError then GetError to make sure
+ // it actually got a value so it can report correctly to the client.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
+ bool /* valid */) {
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ const char* log = "hello"; // Matches auto-generated unit test.
+ DoCreateShader(
+ GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientVertexShaderId), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientFragmentShaderId), true);
+
+ InSequence dummy;
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceVertexShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceFragmentShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1));
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(strlen(log) + 1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetProgramInfoLog(kServiceProgramId, strlen(log) + 1, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<2>(strlen(log)),
+ SetArrayArgument<3>(log, log + strlen(log) + 1)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+
+ Program* program = GetProgram(client_program_id_);
+ ASSERT_TRUE(program != NULL);
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, kClientVertexShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribfv, 0>(
+ bool valid) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoVertexAttribPointer(1, 1, GL_FLOAT, 0, 0);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribiv, 0>(
+ bool valid) {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoVertexAttribPointer(1, 1, GL_FLOAT, 0, 0);
+ if (valid) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>(
+ bool valid) {
+ DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
+ kServiceRenderbufferId);
+ if (valid) {
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, _, 3, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
new file mode 100644
index 0000000..b60bd3e
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -0,0 +1,1963 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_1.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
+
+// TODO(gman): ActiveTexture
+
+TEST_P(GLES2DecoderTest1, AttachShaderValidArgs) {
+ EXPECT_CALL(*gl_, AttachShader(kServiceProgramId, kServiceShaderId));
+ SpecializedSetup<cmds::AttachShader, 0>(true);
+ cmds::AttachShader cmd;
+ cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): BindAttribLocationBucket
+
+TEST_P(GLES2DecoderTest1, BindBufferValidArgs) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceBufferId));
+ SpecializedSetup<cmds::BindBuffer, 0>(true);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_ARRAY_BUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindBufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindBuffer, 0>(true);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_ARRAY_BUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindBufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindBuffer(_, _)).Times(0);
+ SpecializedSetup<cmds::BindBuffer, 0>(false);
+ cmds::BindBuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferValidArgs) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, kServiceFramebufferId));
+ SpecializedSetup<cmds::BindFramebuffer, 0>(true);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindFramebuffer, 0>(true);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindFramebuffer, 0>(false);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindFramebufferInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindFramebuffer, 0>(false);
+ cmds::BindFramebuffer cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER, client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferValidArgs) {
+ EXPECT_CALL(*gl_,
+ BindRenderbufferEXT(GL_RENDERBUFFER, kServiceRenderbufferId));
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(true);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(GL_RENDERBUFFER, kNewServiceId));
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(true);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_RENDERBUFFER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindRenderbufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(_, _)).Times(0);
+ SpecializedSetup<cmds::BindRenderbuffer, 0>(false);
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER, client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureValidArgs) {
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId));
+ SpecializedSetup<cmds::BindTexture, 0>(true);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_2D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureValidArgsNewId) {
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ SpecializedSetup<cmds::BindTexture, 0>(true);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_2D, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BindTexture(_, _)).Times(0);
+ SpecializedSetup<cmds::BindTexture, 0>(false);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_1D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BindTextureInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BindTexture(_, _)).Times(0);
+ SpecializedSetup<cmds::BindTexture, 0>(false);
+ cmds::BindTexture cmd;
+ cmd.Init(GL_TEXTURE_3D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendColorValidArgs) {
+ EXPECT_CALL(*gl_, BlendColor(1, 2, 3, 4));
+ SpecializedSetup<cmds::BlendColor, 0>(true);
+ cmds::BlendColor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationValidArgs) {
+ EXPECT_CALL(*gl_, BlendEquation(GL_FUNC_SUBTRACT));
+ SpecializedSetup<cmds::BlendEquation, 0>(true);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_FUNC_SUBTRACT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BlendEquation(_)).Times(0);
+ SpecializedSetup<cmds::BlendEquation, 0>(false);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_MIN);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BlendEquation(_)).Times(0);
+ SpecializedSetup<cmds::BlendEquation, 0>(false);
+ cmds::BlendEquation cmd;
+ cmd.Init(GL_MAX);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateValidArgs) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_SUBTRACT, GL_FUNC_ADD));
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(true);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_MIN, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_MAX, GL_FUNC_ADD);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_MIN);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendEquationSeparateInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, BlendEquationSeparate(_, _)).Times(0);
+ SpecializedSetup<cmds::BlendEquationSeparate, 0>(false);
+ cmds::BlendEquationSeparate cmd;
+ cmd.Init(GL_FUNC_SUBTRACT, GL_MAX);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendFuncValidArgs) {
+ EXPECT_CALL(*gl_, BlendFunc(GL_ZERO, GL_ZERO));
+ SpecializedSetup<cmds::BlendFunc, 0>(true);
+ cmds::BlendFunc cmd;
+ cmd.Init(GL_ZERO, GL_ZERO);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, BlendFuncSeparateValidArgs) {
+ EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO));
+ SpecializedSetup<cmds::BlendFuncSeparate, 0>(true);
+ cmds::BlendFuncSeparate cmd;
+ cmd.Init(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): BufferData
+
+// TODO(gman): BufferSubData
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusValidArgs) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER));
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(true);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER)).Times(0);
+ SpecializedSetup<cmds::CheckFramebufferStatus, 0>(false);
+ cmds::CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, ClearValidArgs) {
+ EXPECT_CALL(*gl_, Clear(1));
+ SpecializedSetup<cmds::Clear, 0>(true);
+ cmds::Clear cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearColorValidArgs) {
+ EXPECT_CALL(*gl_, ClearColor(1, 2, 3, 4));
+ SpecializedSetup<cmds::ClearColor, 0>(true);
+ cmds::ClearColor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearDepthfValidArgs) {
+ EXPECT_CALL(*gl_, ClearDepth(0.5f));
+ SpecializedSetup<cmds::ClearDepthf, 0>(true);
+ cmds::ClearDepthf cmd;
+ cmd.Init(0.5f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ClearStencilValidArgs) {
+ EXPECT_CALL(*gl_, ClearStencil(1));
+ SpecializedSetup<cmds::ClearStencil, 0>(true);
+ cmds::ClearStencil cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, ColorMaskValidArgs) {
+ SpecializedSetup<cmds::ColorMask, 0>(true);
+ cmds::ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): CompileShader
+// TODO(gman): CompressedTexImage2DBucket
+// TODO(gman): CompressedTexImage2D
+
+// TODO(gman): CompressedTexSubImage2DBucket
+// TODO(gman): CompressedTexSubImage2D
+
+// TODO(gman): CopyTexImage2D
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DValidArgs) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8));
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(true);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs6_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, -1, 8);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CopyTexSubImage2DInvalidArgs7_0) {
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(_, _, _, _, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::CopyTexSubImage2D, 0>(false);
+ cmds::CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CreateProgramValidArgs) {
+ EXPECT_CALL(*gl_, CreateProgram()).WillOnce(Return(kNewServiceId));
+ SpecializedSetup<cmds::CreateProgram, 0>(true);
+ cmds::CreateProgram cmd;
+ cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetProgram(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, CreateShaderValidArgs) {
+ EXPECT_CALL(*gl_, CreateShader(GL_VERTEX_SHADER))
+ .WillOnce(Return(kNewServiceId));
+ SpecializedSetup<cmds::CreateShader, 0>(true);
+ cmds::CreateShader cmd;
+ cmd.Init(GL_VERTEX_SHADER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetShader(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, CreateShaderInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, CreateShader(_)).Times(0);
+ SpecializedSetup<cmds::CreateShader, 0>(false);
+ cmds::CreateShader cmd;
+ cmd.Init(GL_GEOMETRY_SHADER, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, CullFaceValidArgs) {
+ EXPECT_CALL(*gl_, CullFace(GL_FRONT));
+ SpecializedSetup<cmds::CullFace, 0>(true);
+ cmds::CullFace cmd;
+ cmd.Init(GL_FRONT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteBuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, Pointee(kServiceBufferId))).Times(1);
+ cmds::DeleteBuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteBuffersImmediate>();
+ SpecializedSetup<cmds::DeleteBuffersImmediate, 0>(true);
+ cmd.Init(1, &client_buffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_buffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(client_buffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteBuffersImmediateInvalidArgs) {
+ cmds::DeleteBuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteBuffersImmediate>();
+ SpecializedSetup<cmds::DeleteBuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteFramebuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, Pointee(kServiceFramebufferId)))
+ .Times(1);
+ cmds::DeleteFramebuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteFramebuffersImmediate>();
+ SpecializedSetup<cmds::DeleteFramebuffersImmediate, 0>(true);
+ cmd.Init(1, &client_framebuffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_framebuffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(client_framebuffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteFramebuffersImmediateInvalidArgs) {
+ cmds::DeleteFramebuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteFramebuffersImmediate>();
+ SpecializedSetup<cmds::DeleteFramebuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteProgramValidArgs) {
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId));
+ SpecializedSetup<cmds::DeleteProgram, 0>(true);
+ cmds::DeleteProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteRenderbuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, Pointee(kServiceRenderbufferId)))
+ .Times(1);
+ cmds::DeleteRenderbuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteRenderbuffersImmediate>();
+ SpecializedSetup<cmds::DeleteRenderbuffersImmediate, 0>(true);
+ cmd.Init(1, &client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_renderbuffer_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(client_renderbuffer_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteRenderbuffersImmediateInvalidArgs) {
+ cmds::DeleteRenderbuffersImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteRenderbuffersImmediate>();
+ SpecializedSetup<cmds::DeleteRenderbuffersImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DeleteShaderValidArgs) {
+ EXPECT_CALL(*gl_, DeleteShader(kServiceShaderId));
+ SpecializedSetup<cmds::DeleteShader, 0>(true);
+ cmds::DeleteShader cmd;
+ cmd.Init(client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DeleteTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(kServiceTextureId))).Times(1);
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(true);
+ cmd.Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_texture_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(client_texture_id_) == NULL);
+}
+
+TEST_P(GLES2DecoderTest1, DeleteTexturesImmediateInvalidArgs) {
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderTest1, DepthFuncValidArgs) {
+ EXPECT_CALL(*gl_, DepthFunc(GL_NEVER));
+ SpecializedSetup<cmds::DepthFunc, 0>(true);
+ cmds::DepthFunc cmd;
+ cmd.Init(GL_NEVER);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DepthMaskValidArgs) {
+ SpecializedSetup<cmds::DepthMask, 0>(true);
+ cmds::DepthMask cmd;
+ cmd.Init(true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DepthRangefValidArgs) {
+ EXPECT_CALL(*gl_, DepthRange(1, 2));
+ SpecializedSetup<cmds::DepthRangef, 0>(true);
+ cmds::DepthRangef cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DetachShaderValidArgs) {
+ EXPECT_CALL(*gl_, DetachShader(kServiceProgramId, kServiceShaderId));
+ SpecializedSetup<cmds::DetachShader, 0>(true);
+ cmds::DetachShader cmd;
+ cmd.Init(client_program_id_, client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableValidArgs) {
+ SetupExpectationsForEnableDisable(GL_BLEND, false);
+ SpecializedSetup<cmds::Disable, 0>(true);
+ cmds::Disable cmd;
+ cmd.Init(GL_BLEND);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Disable(_)).Times(0);
+ SpecializedSetup<cmds::Disable, 0>(false);
+ cmds::Disable cmd;
+ cmd.Init(GL_CLIP_PLANE0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, Disable(_)).Times(0);
+ SpecializedSetup<cmds::Disable, 0>(false);
+ cmds::Disable cmd;
+ cmd.Init(GL_POINT_SPRITE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, DisableVertexAttribArrayValidArgs) {
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(1));
+ SpecializedSetup<cmds::DisableVertexAttribArray, 0>(true);
+ cmds::DisableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): DrawArrays
+
+// TODO(gman): DrawElements
+
+TEST_P(GLES2DecoderTest1, EnableValidArgs) {
+ SetupExpectationsForEnableDisable(GL_BLEND, true);
+ SpecializedSetup<cmds::Enable, 0>(true);
+ cmds::Enable cmd;
+ cmd.Init(GL_BLEND);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Enable(_)).Times(0);
+ SpecializedSetup<cmds::Enable, 0>(false);
+ cmds::Enable cmd;
+ cmd.Init(GL_CLIP_PLANE0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, Enable(_)).Times(0);
+ SpecializedSetup<cmds::Enable, 0>(false);
+ cmds::Enable cmd;
+ cmd.Init(GL_POINT_SPRITE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, EnableVertexAttribArrayValidArgs) {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(1));
+ SpecializedSetup<cmds::EnableVertexAttribArray, 0>(true);
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FinishValidArgs) {
+ EXPECT_CALL(*gl_, Finish());
+ SpecializedSetup<cmds::Finish, 0>(true);
+ cmds::Finish cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FlushValidArgs) {
+ EXPECT_CALL(*gl_, Flush());
+ SpecializedSetup<cmds::Flush, 0>(true);
+ cmds::Flush cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferValidArgs) {
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId));
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(true);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferRenderbufferInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(false);
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DValidArgs) {
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0));
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(true);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FramebufferTexture2DInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ SpecializedSetup<cmds::FramebufferTexture2D, 0>(false);
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_PROXY_TEXTURE_CUBE_MAP,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, FrontFaceValidArgs) {
+ EXPECT_CALL(*gl_, FrontFace(GL_CW));
+ SpecializedSetup<cmds::FrontFace, 0>(true);
+ cmds::FrontFace cmd;
+ cmd.Init(GL_CW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenBuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenBuffersImmediate* cmd = GetImmediateAs<cmds::GenBuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenBuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetBuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenBuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _)).Times(0);
+ cmds::GenBuffersImmediate* cmd = GetImmediateAs<cmds::GenBuffersImmediate>();
+ SpecializedSetup<cmds::GenBuffersImmediate, 0>(false);
+ cmd->Init(1, &client_buffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_buffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapValidArgs) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ SpecializedSetup<cmds::GenerateMipmap, 0>(true);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ SpecializedSetup<cmds::GenerateMipmap, 0>(false);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_1D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenerateMipmapInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ SpecializedSetup<cmds::GenerateMipmap, 0>(false);
+ cmds::GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_3D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GenFramebuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenFramebuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenFramebuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenFramebuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetFramebuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenFramebuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(_, _)).Times(0);
+ cmds::GenFramebuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenFramebuffersImmediate>();
+ SpecializedSetup<cmds::GenFramebuffersImmediate, 0>(false);
+ cmd->Init(1, &client_framebuffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_framebuffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenRenderbuffersImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenRenderbuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenRenderbuffersImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenRenderbuffersImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetRenderbuffer(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenRenderbuffersImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(_, _)).Times(0);
+ cmds::GenRenderbuffersImmediate* cmd =
+ GetImmediateAs<cmds::GenRenderbuffersImmediate>();
+ SpecializedSetup<cmds::GenRenderbuffersImmediate, 0>(false);
+ cmd->Init(1, &client_renderbuffer_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_renderbuffer_id_)));
+}
+
+TEST_P(GLES2DecoderTest1, GenTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(kNewClientId) != NULL);
+}
+
+TEST_P(GLES2DecoderTest1, GenTexturesImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(_, _)).Times(0);
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(false);
+ cmd->Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_texture_id_)));
+}
+// TODO(gman): GetActiveAttrib
+
+// TODO(gman): GetActiveUniform
+
+// TODO(gman): GetAttachedShaders
+
+// TODO(gman): GetAttribLocation
+
+TEST_P(GLES2DecoderTest1, GetBooleanvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetBooleanv, 0>(true);
+ typedef cmds::GetBooleanv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetBooleanv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBooleanvInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetBooleanv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetBooleanv, 0>(false);
+ cmds::GetBooleanv::Result* result =
+ static_cast<cmds::GetBooleanv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBooleanv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivValidArgs) {
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(true);
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_BUFFER_SIZE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_PIXEL_PACK_BUFFER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER, GL_BUFFER_SIZE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetBufferParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetBufferParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetBufferParameteriv, 0>(false);
+ cmds::GetBufferParameteriv::Result* result =
+ static_cast<cmds::GetBufferParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetBufferParameteriv cmd;
+ cmd.Init(GL_ARRAY_BUFFER,
+ GL_BUFFER_SIZE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetErrorValidArgs) {
+ EXPECT_CALL(*gl_, GetError());
+ SpecializedSetup<cmds::GetError, 0>(true);
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetErrorInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, GetError()).Times(0);
+ SpecializedSetup<cmds::GetError, 0>(false);
+ cmds::GetError cmd;
+ cmd.Init(kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetFloatv, 0>(true);
+ typedef cmds::GetFloatv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetFloatv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFloatvInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetFloatv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetFloatv, 0>(false);
+ cmds::GetFloatv::Result* result =
+ static_cast<cmds::GetFloatv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetFloatv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(true);
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetFramebufferAttachmentParameterivEXT(
+ GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ result->GetData()));
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ kInvalidSharedMemoryId,
+ 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetFramebufferAttachmentParameterivInvalidArgs3_1) {
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ SpecializedSetup<cmds::GetFramebufferAttachmentParameteriv, 0>(false);
+ cmds::GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<cmds::GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetIntegerv, 0>(true);
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivValidArgs) {
+ SpecializedSetup<cmds::GetProgramiv, 0>(true);
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_,
+ GL_DELETE_STATUS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_,
+ GL_DELETE_STATUS,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramInfoLogValidArgs) {
+ const char* kInfo = "hello";
+ const uint32_t kBucketId = 123;
+ SpecializedSetup<cmds::GetProgramInfoLog, 0>(true);
+
+ cmds::GetProgramInfoLog cmd;
+ cmd.Init(client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
+ EXPECT_EQ(0,
+ memcmp(bucket->GetData(0, bucket->size()), kInfo, bucket->size()));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetProgramInfoLogInvalidArgs) {
+ const uint32_t kBucketId = 123;
+ cmds::GetProgramInfoLog cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(true);
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(
+ *gl_,
+ GetRenderbufferParameterivEXT(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, result->GetData()));
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_RENDERBUFFER_RED_SIZE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetRenderbufferParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetRenderbufferParameteriv, 0>(false);
+ cmds::GetRenderbufferParameteriv::Result* result =
+ static_cast<cmds::GetRenderbufferParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ cmds::GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetShaderiv, 0>(true);
+ typedef cmds::GetShaderiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetShaderiv(kServiceShaderId, GL_SHADER_TYPE, result->GetData()));
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_,
+ GL_SHADER_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_SHADER_TYPE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetShaderiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetShaderiv, 0>(false);
+ cmds::GetShaderiv::Result* result =
+ static_cast<cmds::GetShaderiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_, GL_SHADER_TYPE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetShaderivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetShaderiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetShaderiv, 0>(false);
+ cmds::GetShaderiv::Result* result =
+ static_cast<cmds::GetShaderiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetShaderiv cmd;
+ cmd.Init(client_shader_id_,
+ GL_SHADER_TYPE,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetShaderInfoLog
+// TODO(gman): GetShaderPrecisionFormat
+
+// TODO(gman): GetShaderSource
+// TODO(gman): GetString
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(true);
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetTexParameterfv(
+ GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, result->GetData()));
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_TEXTURE_MAG_FILTER),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_GENERATE_MIPMAP,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterfvInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetTexParameterfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameterfv, 0>(false);
+ cmds::GetTexParameterfv::Result* result =
+ static_cast<cmds::GetTexParameterfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameterfv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivValidArgs) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(true);
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetTexParameteriv(
+ GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, result->GetData()));
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_TEXTURE_MAG_FILTER),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_PROXY_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_GENERATE_MIPMAP,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetTexParameterivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetTexParameteriv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetTexParameteriv, 0>(false);
+ cmds::GetTexParameteriv::Result* result =
+ static_cast<cmds::GetTexParameteriv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetTexParameteriv cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetUniformfv
+
+// TODO(gman): GetUniformiv
+
+// TODO(gman): GetUniformLocation
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvValidArgs) {
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(true);
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetVertexAttribfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(false);
+ cmds::GetVertexAttribfv::Result* result =
+ static_cast<cmds::GetVertexAttribfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribfvInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetVertexAttribfv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribfv, 0>(false);
+ cmds::GetVertexAttribfv::Result* result =
+ static_cast<cmds::GetVertexAttribfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribfv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivValidArgs) {
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(true);
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetVertexAttribiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(false);
+ cmds::GetVertexAttribiv::Result* result =
+ static_cast<cmds::GetVertexAttribiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest1, GetVertexAttribivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetVertexAttribiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetVertexAttribiv, 0>(false);
+ cmds::GetVertexAttribiv::Result* result =
+ static_cast<cmds::GetVertexAttribiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetVertexAttribiv cmd;
+ cmd.Init(1,
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+// TODO(gman): GetVertexAttribPointerv
+
+TEST_P(GLES2DecoderTest1, HintValidArgs) {
+ EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST));
+ SpecializedSetup<cmds::Hint, 0>(true);
+ cmds::Hint cmd;
+ cmd.Init(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, HintInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, Hint(_, _)).Times(0);
+ SpecializedSetup<cmds::Hint, 0>(false);
+ cmds::Hint cmd;
+ cmd.Init(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsBufferValidArgs) {
+ SpecializedSetup<cmds::IsBuffer, 0>(true);
+ cmds::IsBuffer cmd;
+ cmd.Init(client_buffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsBufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsBuffer, 0>(false);
+ cmds::IsBuffer cmd;
+ cmd.Init(client_buffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_buffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledValidArgs) {
+ SpecializedSetup<cmds::IsEnabled, 0>(true);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_BLEND, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, IsEnabled(_)).Times(0);
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_CLIP_PLANE0, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, IsEnabled(_)).Times(0);
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_POINT_SPRITE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsEnabledInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsEnabled, 0>(false);
+ cmds::IsEnabled cmd;
+ cmd.Init(GL_BLEND, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(GL_BLEND, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsFramebufferValidArgs) {
+ SpecializedSetup<cmds::IsFramebuffer, 0>(true);
+ cmds::IsFramebuffer cmd;
+ cmd.Init(client_framebuffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsFramebufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsFramebuffer, 0>(false);
+ cmds::IsFramebuffer cmd;
+ cmd.Init(
+ client_framebuffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_framebuffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsProgramValidArgs) {
+ SpecializedSetup<cmds::IsProgram, 0>(true);
+ cmds::IsProgram cmd;
+ cmd.Init(client_program_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsProgramInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsProgram, 0>(false);
+ cmds::IsProgram cmd;
+ cmd.Init(client_program_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsRenderbufferValidArgs) {
+ SpecializedSetup<cmds::IsRenderbuffer, 0>(true);
+ cmds::IsRenderbuffer cmd;
+ cmd.Init(client_renderbuffer_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsRenderbufferInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsRenderbuffer, 0>(false);
+ cmds::IsRenderbuffer cmd;
+ cmd.Init(
+ client_renderbuffer_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_renderbuffer_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsShaderValidArgs) {
+ SpecializedSetup<cmds::IsShader, 0>(true);
+ cmds::IsShader cmd;
+ cmd.Init(client_shader_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsShaderInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsShader, 0>(false);
+ cmds::IsShader cmd;
+ cmd.Init(client_shader_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_shader_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, IsTextureValidArgs) {
+ SpecializedSetup<cmds::IsTexture, 0>(true);
+ cmds::IsTexture cmd;
+ cmd.Init(client_texture_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, IsTextureInvalidArgsBadSharedMemoryId) {
+ SpecializedSetup<cmds::IsTexture, 0>(false);
+ cmds::IsTexture cmd;
+ cmd.Init(client_texture_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(client_texture_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthValidArgs) {
+ EXPECT_CALL(*gl_, LineWidth(0.5f));
+ SpecializedSetup<cmds::LineWidth, 0>(true);
+ cmds::LineWidth cmd;
+ cmd.Init(0.5f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthInvalidValue0_0) {
+ SpecializedSetup<cmds::LineWidth, 0>(false);
+ cmds::LineWidth cmd;
+ cmd.Init(0.0f);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LineWidthNaNValue0) {
+ SpecializedSetup<cmds::LineWidth, 0>(false);
+ cmds::LineWidth cmd;
+ cmd.Init(nanf(""));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, LinkProgramValidArgs) {
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId));
+ SpecializedSetup<cmds::LinkProgram, 0>(true);
+ cmds::LinkProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): PixelStorei
+
+TEST_P(GLES2DecoderTest1, PolygonOffsetValidArgs) {
+ EXPECT_CALL(*gl_, PolygonOffset(1, 2));
+ SpecializedSetup<cmds::PolygonOffset, 0>(true);
+ cmds::PolygonOffset cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): ReadPixels
+
+// TODO(gman): ReleaseShaderCompiler
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageValidArgs) {
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(true);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_FRAMEBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, RenderbufferStorageInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::RenderbufferStorage, 0>(false);
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest1, SampleCoverageValidArgs) {
+ EXPECT_CALL(*gl_, SampleCoverage(1, true));
+ SpecializedSetup<cmds::SampleCoverage, 0>(true);
+ cmds::SampleCoverage cmd;
+ cmd.Init(1, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
new file mode 100644
index 0000000..1d8ac40
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -0,0 +1,565 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTest2 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest2() { }
+
+ void TestAcceptedUniform(GLenum uniform_type, uint32 accepts_apis) {
+ SetupShaderForUniform(uniform_type);
+ bool valid_uniform = false;
+
+ EXPECT_CALL(*gl_, Uniform1i(1, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform2iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform3iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform4iv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1f(1, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform1fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform2fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform3fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, Uniform4fv(1, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix2fv(1, _, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix3fv(1, _, _, _)).Times(AnyNumber());
+ EXPECT_CALL(*gl_, UniformMatrix4fv(1, _, _, _)).Times(AnyNumber());
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1i;
+ cmds::Uniform1i cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1i;
+ cmds::Uniform1ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform1ivImmediate>();
+ GLint data[2][1] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2i;
+ cmds::Uniform2i cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2i;
+ cmds::Uniform2ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform2ivImmediate>();
+ GLint data[2][2] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3i;
+ cmds::Uniform3i cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3i;
+ cmds::Uniform3ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform3ivImmediate>();
+ GLint data[2][3] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4i;
+ cmds::Uniform4i cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4i;
+ cmds::Uniform4ivImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform4ivImmediate>();
+ GLint data[2][4] = {{0}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ ////////////////////
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1f;
+ cmds::Uniform1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform1f;
+ cmds::Uniform1fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform1fvImmediate>();
+ GLfloat data[2][1] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2f;
+ cmds::Uniform2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform2f;
+ cmds::Uniform2fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform2fvImmediate>();
+ GLfloat data[2][2] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3f;
+ cmds::Uniform3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform3f;
+ cmds::Uniform3fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform3fvImmediate>();
+ GLfloat data[2][3] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4f;
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniform4f;
+ cmds::Uniform4fvImmediate& cmd =
+ *GetImmediateAs<cmds::Uniform4fvImmediate>();
+ GLfloat data[2][4] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix2f;
+ cmds::UniformMatrix2fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix2fvImmediate>();
+ GLfloat data[2][2 * 2] = {{0.0f}};
+
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix3f;
+ cmds::UniformMatrix3fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix3fvImmediate>();
+ GLfloat data[2][3 * 3] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+
+ {
+ valid_uniform = accepts_apis & Program::kUniformMatrix4f;
+ cmds::UniformMatrix4fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix4fvImmediate>();
+ GLfloat data[2][4 * 4] = {{0.0f}};
+ cmd.Init(1, 2, &data[0][0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(data)));
+ EXPECT_EQ(valid_uniform ? GL_NO_ERROR : GL_INVALID_OPERATION,
+ GetGLError());
+ }
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest2, ::testing::Bool());
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GenQueriesEXTImmediate, 0>(
+ bool valid) {
+ if (!valid) {
+ // Make the client_query_id_ so that trying to make it again
+ // will fail.
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetImmediateAs<cmds::GenQueriesEXTImmediate>();
+ cmd.Init(1, &client_query_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::DeleteQueriesEXTImmediate, 0>(
+ bool valid) {
+ if (valid) {
+ // Make the client_query_id_ so that trying to delete it will succeed.
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetImmediateAs<cmds::GenQueriesEXTImmediate>();
+ cmd.Init(1, &client_query_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
+ }
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
+ bool /* valid */) {
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ DoCreateShader(
+ GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientVertexShaderId), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(kClientFragmentShaderId), true);
+
+ InSequence dummy;
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceVertexShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(kServiceProgramId, kServiceFragmentShaderId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1));
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(
+ *gl_,
+ GetProgramiv(kServiceProgramId, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(client_program_id_, kClientVertexShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UseProgram, 0>(
+ bool /* valid */) {
+ // Needs the same setup as LinkProgram.
+ SpecializedSetup<cmds::LinkProgram, 0>(false);
+
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::ValidateProgram, 0>(
+ bool /* valid */) {
+ // Needs the same setup as LinkProgram.
+ SpecializedSetup<cmds::LinkProgram, 0>(false);
+
+ EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+
+ EXPECT_CALL(*gl_,
+ GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4i, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT2);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix3fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT3);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix4fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT4);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterf, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameteri, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterfvImmediate, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterivImmediate, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h"
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT) {
+ TestAcceptedUniform(GL_INT, Program::kUniform1i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC2) {
+ TestAcceptedUniform(GL_INT_VEC2, Program::kUniform2i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC3) {
+ TestAcceptedUniform(GL_INT_VEC3, Program::kUniform3i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_INT_VEC4) {
+ TestAcceptedUniform(GL_INT_VEC4, Program::kUniform4i);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL) {
+ TestAcceptedUniform(GL_BOOL, Program::kUniform1i | Program::kUniform1f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC2) {
+ TestAcceptedUniform(GL_BOOL_VEC2, Program::kUniform2i | Program::kUniform2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC3) {
+ TestAcceptedUniform(GL_BOOL_VEC3, Program::kUniform3i | Program::kUniform3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_BOOL_VEC4) {
+ TestAcceptedUniform(GL_BOOL_VEC4, Program::kUniform4i | Program::kUniform4f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniformTypeFLOAT) {
+ TestAcceptedUniform(GL_FLOAT, Program::kUniform1f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC2) {
+ TestAcceptedUniform(GL_FLOAT_VEC2, Program::kUniform2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC3) {
+ TestAcceptedUniform(GL_FLOAT_VEC3, Program::kUniform3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_VEC4) {
+ TestAcceptedUniform(GL_FLOAT_VEC4, Program::kUniform4f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT2) {
+ TestAcceptedUniform(GL_FLOAT_MAT2, Program::kUniformMatrix2f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT3) {
+ TestAcceptedUniform(GL_FLOAT_MAT3, Program::kUniformMatrix3f);
+}
+
+TEST_P(GLES2DecoderTest2, AcceptsUniform_GL_FLOAT_MAT4) {
+ TestAcceptedUniform(GL_FLOAT_MAT4, Program::kUniformMatrix4f);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
new file mode 100644
index 0000000..95c2027
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -0,0 +1,717 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_2.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+
+TEST_P(GLES2DecoderTest2, ScissorValidArgs) {
+ EXPECT_CALL(*gl_, Scissor(1, 2, 3, 4));
+ SpecializedSetup<cmds::Scissor, 0>(true);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ScissorInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, Scissor(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Scissor, 0>(false);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ScissorInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, Scissor(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Scissor, 0>(false);
+ cmds::Scissor cmd;
+ cmd.Init(1, 2, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+// TODO(gman): ShaderBinary
+
+// TODO(gman): ShaderSourceBucket
+
+TEST_P(GLES2DecoderTest2, StencilFuncValidArgs) {
+ EXPECT_CALL(*gl_, StencilFunc(GL_NEVER, 2, 3));
+ SpecializedSetup<cmds::StencilFunc, 0>(true);
+ cmds::StencilFunc cmd;
+ cmd.Init(GL_NEVER, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilFuncSeparateValidArgs) {
+ EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_NEVER, 3, 4));
+ SpecializedSetup<cmds::StencilFuncSeparate, 0>(true);
+ cmds::StencilFuncSeparate cmd;
+ cmd.Init(GL_FRONT, GL_NEVER, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilMaskValidArgs) {
+ SpecializedSetup<cmds::StencilMask, 0>(true);
+ cmds::StencilMask cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilMaskSeparateValidArgs) {
+ SpecializedSetup<cmds::StencilMaskSeparate, 0>(true);
+ cmds::StencilMaskSeparate cmd;
+ cmd.Init(GL_FRONT, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilOpValidArgs) {
+ EXPECT_CALL(*gl_, StencilOp(GL_KEEP, GL_INCR, GL_KEEP));
+ SpecializedSetup<cmds::StencilOp, 0>(true);
+ cmds::StencilOp cmd;
+ cmd.Init(GL_KEEP, GL_INCR, GL_KEEP);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, StencilOpSeparateValidArgs) {
+ EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP));
+ SpecializedSetup<cmds::StencilOpSeparate, 0>(true);
+ cmds::StencilOpSeparate cmd;
+ cmd.Init(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): TexImage2D
+
+TEST_P(GLES2DecoderTest2, TexParameterfValidArgs) {
+ EXPECT_CALL(*gl_,
+ TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ SpecializedSetup<cmds::TexParameterf, 0>(true);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterf, 0>(false);
+ cmds::TexParameterf cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateValidArgs) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(true);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ TexParameterf(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ *reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs0_0) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs0_1) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterfvImmediateInvalidArgs1_0) {
+ cmds::TexParameterfvImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterfvImmediate>();
+ EXPECT_CALL(*gl_, TexParameterf(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterfvImmediate, 0>(false);
+ GLfloat temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriValidArgs) {
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ SpecializedSetup<cmds::TexParameteri, 0>(true);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameteriInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateValidArgs) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(true);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ *reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs0_0) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs0_1) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, TexParameterivImmediateInvalidArgs1_0) {
+ cmds::TexParameterivImmediate& cmd =
+ *GetImmediateAs<cmds::TexParameterivImmediate>();
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameterivImmediate, 0>(false);
+ GLint temp[1] = {
+ GL_NEAREST,
+ };
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+// TODO(gman): TexSubImage2D
+
+TEST_P(GLES2DecoderTest2, Uniform1fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform1f, 0>(true);
+ cmds::Uniform1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform1fvImmediateValidArgs) {
+ cmds::Uniform1fvImmediate& cmd = *GetImmediateAs<cmds::Uniform1fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform1fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform1fvImmediate, 0>(true);
+ GLfloat temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): Uniform1i
+// TODO(gman): Uniform1ivImmediate
+
+TEST_P(GLES2DecoderTest2, Uniform2fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform2fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform2f, 0>(true);
+ cmds::Uniform2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2fvImmediateValidArgs) {
+ cmds::Uniform2fvImmediate& cmd = *GetImmediateAs<cmds::Uniform2fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform2fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform2fvImmediate, 0>(true);
+ GLfloat temp[2 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform2iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform2i, 0>(true);
+ cmds::Uniform2i cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform2ivImmediateValidArgs) {
+ cmds::Uniform2ivImmediate& cmd = *GetImmediateAs<cmds::Uniform2ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform2iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform2ivImmediate, 0>(true);
+ GLint temp[2 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform3fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform3f, 0>(true);
+ cmds::Uniform3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3fvImmediateValidArgs) {
+ cmds::Uniform3fvImmediate& cmd = *GetImmediateAs<cmds::Uniform3fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform3fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform3fvImmediate, 0>(true);
+ GLfloat temp[3 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform3iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform3i, 0>(true);
+ cmds::Uniform3i cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform3ivImmediateValidArgs) {
+ cmds::Uniform3ivImmediate& cmd = *GetImmediateAs<cmds::Uniform3ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform3iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform3ivImmediate, 0>(true);
+ GLint temp[3 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4f, 0>(true);
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4fvImmediateValidArgs) {
+ cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform4fv(1, 2, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true);
+ GLfloat temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4iv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4i, 0>(true);
+ cmds::Uniform4i cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, Uniform4ivImmediateValidArgs) {
+ cmds::Uniform4ivImmediate& cmd = *GetImmediateAs<cmds::Uniform4ivImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ Uniform4iv(1, 2, reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::Uniform4ivImmediate, 0>(true);
+ GLint temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix2fvImmediateValidArgs) {
+ cmds::UniformMatrix2fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix2fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix2fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(true);
+ GLfloat temp[4 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix3fvImmediateValidArgs) {
+ cmds::UniformMatrix3fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix3fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix3fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix3fvImmediate, 0>(true);
+ GLfloat temp[9 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UniformMatrix4fvImmediateValidArgs) {
+ cmds::UniformMatrix4fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix4fvImmediate>();
+ EXPECT_CALL(
+ *gl_,
+ UniformMatrix4fv(
+ 1, 2, false, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ SpecializedSetup<cmds::UniformMatrix4fvImmediate, 0>(true);
+ GLfloat temp[16 * 2] = {
+ 0,
+ };
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UseProgramValidArgs) {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId));
+ SpecializedSetup<cmds::UseProgram, 0>(true);
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, UseProgramInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, UseProgram(_)).Times(0);
+ SpecializedSetup<cmds::UseProgram, 0>(false);
+ cmds::UseProgram cmd;
+ cmd.Init(kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ValidateProgramValidArgs) {
+ EXPECT_CALL(*gl_, ValidateProgram(kServiceProgramId));
+ SpecializedSetup<cmds::ValidateProgram, 0>(true);
+ cmds::ValidateProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib1fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib1f(1, 2));
+ SpecializedSetup<cmds::VertexAttrib1f, 0>(true);
+ cmds::VertexAttrib1f cmd;
+ cmd.Init(1, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib1fvImmediateValidArgs) {
+ cmds::VertexAttrib1fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib1fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib1fvImmediate, 0>(true);
+ GLfloat temp[1] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib1fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib2fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib2f(1, 2, 3));
+ SpecializedSetup<cmds::VertexAttrib2f, 0>(true);
+ cmds::VertexAttrib2f cmd;
+ cmd.Init(1, 2, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib2fvImmediateValidArgs) {
+ cmds::VertexAttrib2fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib2fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib2fvImmediate, 0>(true);
+ GLfloat temp[2] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib2fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib3fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib3f(1, 2, 3, 4));
+ SpecializedSetup<cmds::VertexAttrib3f, 0>(true);
+ cmds::VertexAttrib3f cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib3fvImmediateValidArgs) {
+ cmds::VertexAttrib3fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib3fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib3fvImmediate, 0>(true);
+ GLfloat temp[3] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib3fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib4fValidArgs) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(1, 2, 3, 4, 5));
+ SpecializedSetup<cmds::VertexAttrib4f, 0>(true);
+ cmds::VertexAttrib4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, VertexAttrib4fvImmediateValidArgs) {
+ cmds::VertexAttrib4fvImmediate& cmd =
+ *GetImmediateAs<cmds::VertexAttrib4fvImmediate>();
+ SpecializedSetup<cmds::VertexAttrib4fvImmediate, 0>(true);
+ GLfloat temp[4] = {
+ 0,
+ };
+ cmd.Init(1, &temp[0]);
+ EXPECT_CALL(*gl_,
+ VertexAttrib4fv(
+ 1, reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): VertexAttribPointer
+
+TEST_P(GLES2DecoderTest2, ViewportValidArgs) {
+ EXPECT_CALL(*gl_, Viewport(1, 2, 3, 4));
+ SpecializedSetup<cmds::Viewport, 0>(true);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ViewportInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, Viewport(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Viewport, 0>(false);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, -1, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, ViewportInvalidArgs3_0) {
+ EXPECT_CALL(*gl_, Viewport(_, _, _, _)).Times(0);
+ SpecializedSetup<cmds::Viewport, 0>(false);
+ cmds::Viewport cmd;
+ cmd.Init(1, 2, 3, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+// TODO(gman): TexStorage2DEXT
+// TODO(gman): GenQueriesEXTImmediate
+// TODO(gman): DeleteQueriesEXTImmediate
+// TODO(gman): BeginQueryEXT
+
+// TODO(gman): EndQueryEXT
+
+// TODO(gman): InsertEventMarkerEXT
+
+// TODO(gman): PushGroupMarkerEXT
+
+TEST_P(GLES2DecoderTest2, PopGroupMarkerEXTValidArgs) {
+ SpecializedSetup<cmds::PopGroupMarkerEXT, 0>(true);
+ cmds::PopGroupMarkerEXT cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+// TODO(gman): GenVertexArraysOESImmediate
+// TODO(gman): DeleteVertexArraysOESImmediate
+// TODO(gman): IsVertexArrayOES
+// TODO(gman): BindVertexArrayOES
+// TODO(gman): SwapBuffers
+// TODO(gman): GetMaxValueInBufferCHROMIUM
+// TODO(gman): EnableFeatureCHROMIUM
+
+// TODO(gman): ResizeCHROMIUM
+// TODO(gman): GetRequestableExtensionsCHROMIUM
+
+// TODO(gman): RequestExtensionCHROMIUM
+
+// TODO(gman): GetMultipleIntegervCHROMIUM
+
+// TODO(gman): GetProgramInfoCHROMIUM
+
+// TODO(gman): GetTranslatedShaderSourceANGLE
+// TODO(gman): PostSubBufferCHROMIUM
+// TODO(gman): TexImageIOSurface2DCHROMIUM
+// TODO(gman): CopyTextureCHROMIUM
+// TODO(gman): DrawArraysInstancedANGLE
+// TODO(gman): DrawElementsInstancedANGLE
+// TODO(gman): VertexAttribDivisorANGLE
+// TODO(gman): GenMailboxCHROMIUM
+
+// TODO(gman): ProduceTextureCHROMIUMImmediate
+// TODO(gman): ProduceTextureDirectCHROMIUMImmediate
+// TODO(gman): ConsumeTextureCHROMIUMImmediate
+// TODO(gman): CreateAndConsumeTextureCHROMIUMImmediate
+// TODO(gman): BindUniformLocationCHROMIUMBucket
+// TODO(gman): BindTexImage2DCHROMIUM
+// TODO(gman): ReleaseTexImage2DCHROMIUM
+// TODO(gman): TraceBeginCHROMIUM
+
+// TODO(gman): TraceEndCHROMIUM
+// TODO(gman): AsyncTexSubImage2DCHROMIUM
+
+// TODO(gman): AsyncTexImage2DCHROMIUM
+
+// TODO(gman): WaitAsyncTexImage2DCHROMIUM
+
+// TODO(gman): WaitAllAsyncTexImage2DCHROMIUM
+
+// TODO(gman): LoseContextCHROMIUM
+// TODO(gman): InsertSyncPointCHROMIUM
+
+// TODO(gman): WaitSyncPointCHROMIUM
+
+// TODO(gman): DrawBuffersEXTImmediate
+// TODO(gman): DiscardBackbufferCHROMIUM
+
+// TODO(gman): ScheduleOverlayPlaneCHROMIUM
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
new file mode 100644
index 0000000..3fadaf0
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderTest3 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest3() { }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest3, ::testing::Bool());
+
+TEST_P(GLES2DecoderTest3, TraceBeginCHROMIUM) {
+ const uint32 kBucketId = 123;
+ const char kName[] = "test_command";
+ SetBucketAsCString(kBucketId, kName);
+
+ TraceBeginCHROMIUM begin_cmd;
+ begin_cmd.Init(kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+}
+
+TEST_P(GLES2DecoderTest3, TraceEndCHROMIUM) {
+ // Test end fails if no begin.
+ TraceEndCHROMIUM end_cmd;
+ end_cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ const uint32 kBucketId = 123;
+ const char kName[] = "test_command";
+ SetBucketAsCString(kBucketId, kName);
+
+ TraceBeginCHROMIUM begin_cmd;
+ begin_cmd.Init(kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+
+ end_cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
new file mode 100644
index 0000000..7e93f36
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_3.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc
new file mode 100644
index 0000000..d32870d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc
@@ -0,0 +1,388 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderManualInitTest, AsyncPixelTransfers) {
+ InitState init;
+ init.extensions = "GL_CHROMIUM_async_pixel_transfers";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Set up the texture.
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ Texture* texture = texture_ref->texture();
+
+ // Set a mock Async delegate
+ StrictMock<gpu::MockAsyncPixelTransferManager>* manager =
+ new StrictMock<gpu::MockAsyncPixelTransferManager>;
+ manager->Initialize(group().texture_manager());
+ decoder_->SetAsyncPixelTransferManagerForTest(manager);
+ StrictMock<gpu::MockAsyncPixelTransferDelegate>* delegate = NULL;
+
+ // Tex(Sub)Image2D upload commands.
+ AsyncTexImage2DCHROMIUM teximage_cmd;
+ teximage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+ AsyncTexSubImage2DCHROMIUM texsubimage_cmd;
+ texsubimage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+ WaitAsyncTexImage2DCHROMIUM wait_cmd;
+ wait_cmd.Init(GL_TEXTURE_2D);
+ WaitAllAsyncTexImage2DCHROMIUM wait_all_cmd;
+ wait_all_cmd.Init();
+
+ // No transfer state exists initially.
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ base::Closure bind_callback;
+
+ // AsyncTexImage2D
+ {
+ // Create transfer state since it doesn't exist.
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _))
+ .WillOnce(SaveArg<2>(&bind_callback))
+ .RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ // The texture is safe but the level has not been defined yet.
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ GLsizei width, height;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ }
+ {
+ // Async redefinitions are not allowed!
+ // Command fails.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+
+ // Binding/defining of the async transfer
+ {
+ // TODO(epenner): We should check that the manager gets the
+ // BindCompletedAsyncTransfers() call, which is required to
+ // guarantee the delegate calls the bind callback.
+
+ // Simulate the bind callback from the delegate.
+ bind_callback.Run();
+
+ // After the bind callback is run, the texture is safe,
+ // and has the right size etc.
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ GLsizei width, height;
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(width, 8);
+ EXPECT_EQ(height, 8);
+ }
+
+ // AsyncTexSubImage2D
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->GetAsyncPixelTransferManager()->ClearPixelTransferDelegateForTest(
+ texture_ref);
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ texture->SetImmutable(false);
+ {
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexSubImage2D(_, _)).RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+ {
+ // No transfer is in progress.
+ EXPECT_CALL(*delegate, TransferIsInProgress())
+ .WillOnce(Return(false)) // texSubImage validation
+ .WillOnce(Return(false)) // async validation
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexSubImage2D(_, _)).RetiresOnSaturation();
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+ {
+ // A transfer is still in progress!
+ EXPECT_CALL(*delegate, TransferIsInProgress())
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ // No async call, command fails.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsubimage_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ EXPECT_TRUE(texture->IsImmutable());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ }
+
+ // Delete delegate on DeleteTexture.
+ {
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
+ delegate = NULL;
+ }
+
+ // WaitAsyncTexImage2D
+ {
+ // Get a fresh texture since the existing texture cannot be respecified
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ texture_ref = GetTexture(client_texture_id_);
+ texture = texture_ref->texture();
+ texture->SetImmutable(false);
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+ // Start async transfer.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ EXPECT_TRUE(texture->IsImmutable());
+ // Wait for completion.
+ EXPECT_CALL(*delegate, WaitForTransferCompletion());
+ EXPECT_CALL(*manager, BindCompletedAsyncTransfers());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(wait_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // WaitAllAsyncTexImage2D
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
+ delegate = NULL;
+ {
+ // Get a fresh texture since the existing texture cannot be respecified
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ texture_ref = GetTexture(client_texture_id_);
+ texture = texture_ref->texture();
+ texture->SetImmutable(false);
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+ // Start async transfer.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ EXPECT_TRUE(texture->IsImmutable());
+ // Wait for completion of all uploads.
+ EXPECT_CALL(*manager, WaitAllAsyncTexImage2D()).RetiresOnSaturation();
+ EXPECT_CALL(*manager, BindCompletedAsyncTransfers());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(wait_all_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Remove PixelTransferManager before the decoder destroys.
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->ResetAsyncPixelTransferManagerForTest();
+ manager = NULL;
+}
+
+TEST_P(GLES2DecoderManualInitTest, AsyncPixelTransferManager) {
+ InitState init;
+ init.extensions = "GL_CHROMIUM_async_pixel_transfers";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Set up the texture.
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+
+ // Set a mock Async delegate.
+ StrictMock<gpu::MockAsyncPixelTransferManager>* manager =
+ new StrictMock<gpu::MockAsyncPixelTransferManager>;
+ manager->Initialize(group().texture_manager());
+ decoder_->SetAsyncPixelTransferManagerForTest(manager);
+ StrictMock<gpu::MockAsyncPixelTransferDelegate>* delegate = NULL;
+
+ AsyncTexImage2DCHROMIUM teximage_cmd;
+ teximage_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ 0,
+ 0,
+ 0);
+
+ // No transfer delegate exists initially.
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ // Create delegate on AsyncTexImage2D.
+ {
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _)).RetiresOnSaturation();
+
+ // Command succeeds.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Delegate is cached.
+ EXPECT_EQ(delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ // Delete delegate on manager teardown.
+ {
+ EXPECT_EQ(texture_ref->num_observers(), 1);
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ decoder_->ResetAsyncPixelTransferManagerForTest();
+ manager = NULL;
+
+ // Texture ref still valid.
+ EXPECT_EQ(texture_ref, GetTexture(client_texture_id_));
+ EXPECT_EQ(texture_ref->num_observers(), 0);
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
new file mode 100644
index 0000000..36b14e5
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -0,0 +1,484 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderWithShaderTest, GetVertexAttribPointervSucceeds) {
+ const float dummy = 0;
+ const GLuint kOffsetToTestFor = sizeof(dummy) * 4;
+ const GLuint kIndexToTest = 1;
+ GetVertexAttribPointerv::Result* result =
+ static_cast<GetVertexAttribPointerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ const GLuint* result_value = result->GetData();
+ // Test that initial value is 0.
+ GetVertexAttribPointerv cmd;
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(sizeof(*result_value), result->size);
+ EXPECT_EQ(0u, *result_value);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Set the value and see that we get it.
+ SetupVertexBuffer();
+ DoVertexAttribPointer(kIndexToTest, 2, GL_FLOAT, 0, kOffsetToTestFor);
+ result->size = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(sizeof(*result_value), result->size);
+ EXPECT_EQ(kOffsetToTestFor, *result_value);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetVertexAttribPointervBadArgsFails) {
+ const GLuint kIndexToTest = 1;
+ GetVertexAttribPointerv::Result* result =
+ static_cast<GetVertexAttribPointerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ const GLuint* result_value = result->GetData();
+ // Test pname invalid fails.
+ GetVertexAttribPointerv cmd;
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER + 1,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(kInitialResult, *result_value);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ // Test index out of range fails.
+ result->size = 0;
+ cmd.Init(kNumVertexAttribs,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(kInitialResult, *result_value);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // Test memory id bad fails.
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+
+ // Test memory offset bad fails.
+ cmd.Init(kIndexToTest,
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindBufferToDifferentTargetFails) {
+ // Bind the buffer to GL_ARRAY_BUFFER
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ // Attempt to rebind to GL_ELEMENT_ARRAY_BUFFER
+ // NOTE: Real GLES2 does not have this restriction but WebGL and we do.
+ // This can be restriction can be removed at runtime.
+ EXPECT_CALL(*gl_, BindBuffer(_, _)).Times(0);
+ BindBuffer cmd;
+ cmd.Init(GL_ELEMENT_ARRAY_BUFFER, client_buffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, VertexAttribPointer) {
+ SetupVertexBuffer();
+ static const GLenum types[] = {
+ GL_BYTE, GL_UNSIGNED_BYTE, GL_SHORT, GL_UNSIGNED_SHORT,
+ GL_FLOAT, GL_FIXED, GL_INT, GL_UNSIGNED_INT,
+ };
+ static const GLsizei sizes[] = {
+ 1, 1, 2, 2, 4, 4, 4, 4,
+ };
+ static const GLuint indices[] = {
+ 0, 1, kNumVertexAttribs - 1, kNumVertexAttribs,
+ };
+ static const GLsizei offset_mult[] = {
+ 0, 0, 1, 1, 2, 1000,
+ };
+ static const GLsizei offset_offset[] = {
+ 0, 1, 0, 1, 0, 0,
+ };
+ static const GLsizei stride_mult[] = {
+ -1, 0, 0, 1, 1, 2, 1000,
+ };
+ static const GLsizei stride_offset[] = {
+ 0, 0, 1, 0, 1, 0, 0,
+ };
+ for (size_t tt = 0; tt < arraysize(types); ++tt) {
+ GLenum type = types[tt];
+ GLsizei num_bytes = sizes[tt];
+ for (size_t ii = 0; ii < arraysize(indices); ++ii) {
+ GLuint index = indices[ii];
+ for (GLint size = 0; size < 5; ++size) {
+ for (size_t oo = 0; oo < arraysize(offset_mult); ++oo) {
+ GLuint offset = num_bytes * offset_mult[oo] + offset_offset[oo];
+ for (size_t ss = 0; ss < arraysize(stride_mult); ++ss) {
+ GLsizei stride = num_bytes * stride_mult[ss] + stride_offset[ss];
+ for (int normalize = 0; normalize < 2; ++normalize) {
+ bool index_good = index < static_cast<GLuint>(kNumVertexAttribs);
+ bool size_good = (size > 0 && size < 5);
+ bool offset_good = (offset % num_bytes == 0);
+ bool stride_good =
+ (stride % num_bytes == 0) && stride >= 0 && stride <= 255;
+ bool type_good = (type != GL_INT && type != GL_UNSIGNED_INT &&
+ type != GL_FIXED);
+ bool good = size_good && offset_good && stride_good &&
+ type_good && index_good;
+ bool call = good && (type != GL_FIXED);
+ if (call) {
+ EXPECT_CALL(*gl_,
+ VertexAttribPointer(index,
+ size,
+ type,
+ normalize,
+ stride,
+ BufferOffset(offset)));
+ }
+ VertexAttribPointer cmd;
+ cmd.Init(index, size, type, normalize, stride, offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ if (good) {
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ } else if (size_good && offset_good && stride_good && type_good &&
+ !index_good) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else if (size_good && offset_good && stride_good &&
+ !type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ } else if (size_good && offset_good && !stride_good &&
+ type_good && index_good) {
+ if (stride < 0 || stride > 255) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else {
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+ } else if (size_good && !offset_good && stride_good &&
+ type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ } else if (!size_good && offset_good && stride_good &&
+ type_good && index_good) {
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ } else {
+ EXPECT_NE(GL_NO_ERROR, GetGLError());
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+class GLES2DecoderVertexArraysOESTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderVertexArraysOESTest() {}
+
+ bool vertex_array_deleted_manually_;
+
+ virtual void SetUp() {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ AddExpectationsForGenVertexArraysOES();
+ GenHelper<GenVertexArraysOESImmediate>(client_vertexarray_id_);
+
+ vertex_array_deleted_manually_ = false;
+ }
+
+ virtual void TearDown() {
+ // This should only be set if the test handled deletion of the vertex array
+ // itself. Necessary because vertex_array_objects are not sharable, and thus
+ // not managed in the ContextGroup, meaning they will be destroyed during
+ // test tear down
+ if (!vertex_array_deleted_manually_) {
+ AddExpectationsForDeleteVertexArraysOES();
+ }
+
+ GLES2DecoderWithShaderTest::TearDown();
+ }
+
+ void GenVertexArraysOESImmediateValidArgs() {
+ AddExpectationsForGenVertexArraysOES();
+ GenVertexArraysOESImmediate* cmd =
+ GetImmediateAs<GenVertexArraysOESImmediate>();
+ GLuint temp = kNewClientId;
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(kNewClientId) != NULL);
+ AddExpectationsForDeleteVertexArraysOES();
+ }
+
+ void GenVertexArraysOESImmediateInvalidArgs() {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(_, _)).Times(0);
+ GenVertexArraysOESImmediate* cmd =
+ GetImmediateAs<GenVertexArraysOESImmediate>();
+ cmd->Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_vertexarray_id_)));
+ }
+
+ void DeleteVertexArraysOESImmediateValidArgs() {
+ AddExpectationsForDeleteVertexArraysOES();
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ cmd.Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_vertexarray_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(client_vertexarray_id_) == NULL);
+ vertex_array_deleted_manually_ = true;
+ }
+
+ void DeleteVertexArraysOESImmediateInvalidArgs() {
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ }
+
+ void DeleteBoundVertexArraysOESImmediateValidArgs() {
+ BindVertexArrayOESValidArgs();
+
+ AddExpectationsForDeleteBoundVertexArraysOES();
+ DeleteVertexArraysOESImmediate& cmd =
+ *GetImmediateAs<DeleteVertexArraysOESImmediate>();
+ cmd.Init(1, &client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_vertexarray_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetVertexArrayInfo(client_vertexarray_id_) == NULL);
+ vertex_array_deleted_manually_ = true;
+ }
+
+ void IsVertexArrayOESValidArgs() {
+ IsVertexArrayOES cmd;
+ cmd.Init(client_vertexarray_id_, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ void IsVertexArrayOESInvalidArgsBadSharedMemoryId() {
+ IsVertexArrayOES cmd;
+ cmd.Init(
+ client_vertexarray_id_, kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(
+ client_vertexarray_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ }
+
+ void BindVertexArrayOESValidArgs() {
+ AddExpectationsForBindVertexArrayOES();
+ BindVertexArrayOES cmd;
+ cmd.Init(client_vertexarray_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ void BindVertexArrayOESValidArgsNewId() {
+ BindVertexArrayOES cmd;
+ cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderVertexArraysOESTest,
+ ::testing::Bool());
+
+class GLES2DecoderEmulatedVertexArraysOESTest
+ : public GLES2DecoderVertexArraysOESTest {
+ public:
+ GLES2DecoderEmulatedVertexArraysOESTest() {}
+
+ virtual void SetUp() {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ init.use_native_vao = false;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ AddExpectationsForGenVertexArraysOES();
+ GenHelper<GenVertexArraysOESImmediate>(client_vertexarray_id_);
+
+ vertex_array_deleted_manually_ = false;
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderEmulatedVertexArraysOESTest,
+ ::testing::Bool());
+
+// Test vertex array objects with native support
+TEST_P(GLES2DecoderVertexArraysOESTest, GenVertexArraysOESImmediateValidArgs) {
+ GenVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ GenVertexArraysOESImmediateValidArgs) {
+ GenVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ GenVertexArraysOESImmediateInvalidArgs) {
+ GenVertexArraysOESImmediateInvalidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ GenVertexArraysOESImmediateInvalidArgs) {
+ GenVertexArraysOESImmediateInvalidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateValidArgs) {
+ DeleteVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateValidArgs) {
+ DeleteVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateInvalidArgs) {
+ DeleteVertexArraysOESImmediateInvalidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteVertexArraysOESImmediateInvalidArgs) {
+ DeleteVertexArraysOESImmediateInvalidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ DeleteBoundVertexArraysOESImmediateValidArgs) {
+ DeleteBoundVertexArraysOESImmediateValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ DeleteBoundVertexArraysOESImmediateValidArgs) {
+ DeleteBoundVertexArraysOESImmediateValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, IsVertexArrayOESValidArgs) {
+ IsVertexArrayOESValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest, IsVertexArrayOESValidArgs) {
+ IsVertexArrayOESValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest,
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId) {
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId) {
+ IsVertexArrayOESInvalidArgsBadSharedMemoryId();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, BindVertexArrayOESValidArgs) {
+ BindVertexArrayOESValidArgs();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest, BindVertexArrayOESValidArgs) {
+ BindVertexArrayOESValidArgs();
+}
+
+TEST_P(GLES2DecoderVertexArraysOESTest, BindVertexArrayOESValidArgsNewId) {
+ BindVertexArrayOESValidArgsNewId();
+}
+TEST_P(GLES2DecoderEmulatedVertexArraysOESTest,
+ BindVertexArrayOESValidArgsNewId) {
+ BindVertexArrayOESValidArgsNewId();
+}
+
+TEST_P(GLES2DecoderTest, BufferDataGLError) {
+ GLenum target = GL_ARRAY_BUFFER;
+ GLsizeiptr size = 4;
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ BufferManager* manager = group().buffer_manager();
+ Buffer* buffer = manager->GetBuffer(client_buffer_id_);
+ ASSERT_TRUE(buffer != NULL);
+ EXPECT_EQ(0, buffer->size());
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(target, size, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ BufferData cmd;
+ cmd.Init(target, size, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_EQ(0, buffer->size());
+}
+
+// TODO(gman): BufferData
+
+// TODO(gman): BufferDataImmediate
+
+// TODO(gman): BufferSubData
+
+// TODO(gman): BufferSubDataImmediate
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
new file mode 100644
index 0000000..136834d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -0,0 +1,1679 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgPointee;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+using ::testing::WithArg;
+
+namespace {
+
+void NormalizeInitState(gpu::gles2::GLES2DecoderTestBase::InitState* init) {
+ CHECK(init);
+ const char* kVAOExtensions[] = {
+ "GL_OES_vertex_array_object",
+ "GL_ARB_vertex_array_object",
+ "GL_APPLE_vertex_array_object"
+ };
+ bool contains_vao_extension = false;
+ for (size_t ii = 0; ii < arraysize(kVAOExtensions); ++ii) {
+ if (init->extensions.find(kVAOExtensions[ii]) != std::string::npos) {
+ contains_vao_extension = true;
+ break;
+ }
+ }
+ if (init->use_native_vao) {
+ if (contains_vao_extension)
+ return;
+ if (!init->extensions.empty())
+ init->extensions += " ";
+ if (StartsWithASCII(init->gl_version, "opengl es", false)) {
+ init->extensions += kVAOExtensions[0];
+ } else {
+#if !defined(OS_MACOSX)
+ init->extensions += kVAOExtensions[1];
+#else
+ init->extensions += kVAOExtensions[2];
+#endif // OS_MACOSX
+ }
+ } else {
+ // Make sure we don't set up an invalid InitState.
+ CHECK(!contains_vao_extension);
+ }
+}
+
+} // namespace Anonymous
+
+namespace gpu {
+namespace gles2 {
+
+GLES2DecoderTestBase::GLES2DecoderTestBase()
+ : surface_(NULL),
+ context_(NULL),
+ memory_tracker_(NULL),
+ client_buffer_id_(100),
+ client_framebuffer_id_(101),
+ client_program_id_(102),
+ client_renderbuffer_id_(103),
+ client_shader_id_(104),
+ client_texture_id_(106),
+ client_element_buffer_id_(107),
+ client_vertex_shader_id_(121),
+ client_fragment_shader_id_(122),
+ client_query_id_(123),
+ client_vertexarray_id_(124),
+ service_renderbuffer_id_(0),
+ service_renderbuffer_valid_(false),
+ ignore_cached_state_for_test_(GetParam()),
+ cached_color_mask_red_(true),
+ cached_color_mask_green_(true),
+ cached_color_mask_blue_(true),
+ cached_color_mask_alpha_(true),
+ cached_depth_mask_(true),
+ cached_stencil_front_mask_(static_cast<GLuint>(-1)),
+ cached_stencil_back_mask_(static_cast<GLuint>(-1)) {
+ memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
+}
+
+GLES2DecoderTestBase::~GLES2DecoderTestBase() {}
+
+void GLES2DecoderTestBase::SetUp() {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+}
+
+void GLES2DecoderTestBase::AddExpectationsForVertexAttribManager() {
+ for (GLint ii = 0; ii < kNumVertexAttribs; ++ii) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(ii, 0.0f, 0.0f, 0.0f, 1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+GLES2DecoderTestBase::InitState::InitState()
+ : has_alpha(false),
+ has_depth(false),
+ has_stencil(false),
+ request_alpha(false),
+ request_depth(false),
+ request_stencil(false),
+ bind_generates_resource(false),
+ lose_context_when_out_of_memory(false),
+ use_native_vao(true) {
+}
+
+void GLES2DecoderTestBase::InitDecoder(const InitState& init) {
+ InitDecoderWithCommandLine(init, NULL);
+}
+
+void GLES2DecoderTestBase::InitDecoderWithCommandLine(
+ const InitState& init,
+ const base::CommandLine* command_line) {
+ InitState normalized_init = init;
+ NormalizeInitState(&normalized_init);
+ Framebuffer::ClearFramebufferCompleteComboMap();
+
+ gfx::SetGLGetProcAddressProc(gfx::MockGLInterface::GetGLProcAddress);
+ gfx::GLSurface::InitializeOneOffWithMockBindingsForTests();
+
+ gl_.reset(new StrictMock<MockGLInterface>());
+ ::gfx::MockGLInterface::SetGLInterface(gl_.get());
+
+ SetupMockGLBehaviors();
+
+ // Only create stream texture manager if extension is requested.
+ std::vector<std::string> list;
+ base::SplitString(normalized_init.extensions, ' ', &list);
+ scoped_refptr<FeatureInfo> feature_info;
+ if (command_line)
+ feature_info = new FeatureInfo(*command_line);
+ group_ = scoped_refptr<ContextGroup>(
+ new ContextGroup(NULL,
+ memory_tracker_,
+ new ShaderTranslatorCache,
+ feature_info.get(),
+ normalized_init.bind_generates_resource));
+ bool use_default_textures = normalized_init.bind_generates_resource;
+
+ InSequence sequence;
+
+ surface_ = new gfx::GLSurfaceStub;
+ surface_->SetSize(gfx::Size(kBackBufferWidth, kBackBufferHeight));
+
+ // Context needs to be created before initializing ContextGroup, which will
+ // in turn initialize FeatureInfo, which needs a context to determine
+ // extension support.
+ context_ = new gfx::GLContextStubWithExtensions;
+ context_->AddExtensionsString(normalized_init.extensions.c_str());
+ context_->SetGLVersionString(normalized_init.gl_version.c_str());
+
+ context_->MakeCurrent(surface_.get());
+ gfx::GLSurface::InitializeDynamicMockBindingsForTests(context_.get());
+
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(),
+ DisallowedFeatures(),
+ normalized_init.extensions.c_str(),
+ normalized_init.gl_version.c_str(),
+ normalized_init.bind_generates_resource);
+
+ // We initialize the ContextGroup with a MockGLES2Decoder so that
+ // we can use the ContextGroup to figure out how the real GLES2Decoder
+ // will initialize itself.
+ mock_decoder_.reset(new MockGLES2Decoder());
+
+ // Install FakeDoCommands handler so we can use individual DoCommand()
+ // expectations.
+ EXPECT_CALL(*mock_decoder_, DoCommands(_, _, _, _)).WillRepeatedly(
+ Invoke(mock_decoder_.get(), &MockGLES2Decoder::FakeDoCommands));
+
+ EXPECT_TRUE(
+ group_->Initialize(mock_decoder_.get(), DisallowedFeatures()));
+
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceVertexArrayId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindVertexArrayOES(_)).Times(1).RetiresOnSaturation();
+ }
+
+ if (group_->feature_info()->workarounds().init_vertex_attributes)
+ AddExpectationsForVertexAttribManager();
+
+ AddExpectationsForBindVertexArrayOES();
+
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(0))
+ .Times(1)
+ .RetiresOnSaturation();
+ static GLuint attrib_0_id[] = {
+ kServiceAttrib0BufferId,
+ };
+ static GLuint fixed_attrib_buffer_id[] = {
+ kServiceFixedAttribBufferId,
+ };
+ EXPECT_CALL(*gl_, GenBuffersARB(arraysize(attrib_0_id), _))
+ .WillOnce(SetArrayArgument<1>(attrib_0_id,
+ attrib_0_id + arraysize(attrib_0_id)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceAttrib0BufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GenBuffersARB(arraysize(fixed_attrib_buffer_id), _))
+ .WillOnce(SetArrayArgument<1>(
+ fixed_attrib_buffer_id,
+ fixed_attrib_buffer_id + arraysize(fixed_attrib_buffer_id)))
+ .RetiresOnSaturation();
+
+ for (GLint tt = 0; tt < TestHelper::kNumTextureUnits; ++tt) {
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0 + tt))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().oes_egl_image_external) {
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_EXTERNAL_OES,
+ use_default_textures
+ ? TestHelper::kServiceDefaultExternalTextureId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (group_->feature_info()->feature_flags().arb_texture_rectangle) {
+ EXPECT_CALL(
+ *gl_,
+ BindTexture(GL_TEXTURE_RECTANGLE_ARB,
+ use_default_textures
+ ? TestHelper::kServiceDefaultRectangleTextureId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_CUBE_MAP,
+ use_default_textures
+ ? TestHelper::kServiceDefaultTextureCubemapId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ BindTexture(
+ GL_TEXTURE_2D,
+ use_default_textures ? TestHelper::kServiceDefaultTexture2dId : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_alpha ? 8 : 0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_depth ? 24 : 0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(normalized_init.has_stencil ? 8 : 0))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, Enable(GL_VERTEX_PROGRAM_POINT_SIZE))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, Enable(GL_POINT_SPRITE))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ static GLint max_viewport_dims[] = {
+ kMaxViewportWidth,
+ kMaxViewportHeight
+ };
+ EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VIEWPORT_DIMS, _))
+ .WillOnce(SetArrayArgument<1>(
+ max_viewport_dims, max_viewport_dims + arraysize(max_viewport_dims)))
+ .RetiresOnSaturation();
+
+ SetupInitCapabilitiesExpectations();
+ SetupInitStateExpectations();
+
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(GL_RENDERBUFFER, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ // TODO(boliu): Remove OS_ANDROID once crbug.com/259023 is fixed and the
+ // workaround has been reverted.
+#if !defined(OS_ANDROID)
+ EXPECT_CALL(*gl_, Clear(
+ GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT))
+ .Times(1)
+ .RetiresOnSaturation();
+#endif
+
+ engine_.reset(new StrictMock<MockCommandBufferEngine>());
+ scoped_refptr<gpu::Buffer> buffer =
+ engine_->GetSharedMemoryBuffer(kSharedMemoryId);
+ shared_memory_offset_ = kSharedMemoryOffset;
+ shared_memory_address_ =
+ reinterpret_cast<int8*>(buffer->memory()) + shared_memory_offset_;
+ shared_memory_id_ = kSharedMemoryId;
+ shared_memory_base_ = buffer->memory();
+
+ static const int32 kLoseContextWhenOutOfMemory = 0x10002;
+
+ int32 attributes[] = {
+ EGL_ALPHA_SIZE,
+ normalized_init.request_alpha ? 8 : 0,
+ EGL_DEPTH_SIZE,
+ normalized_init.request_depth ? 24 : 0,
+ EGL_STENCIL_SIZE,
+ normalized_init.request_stencil ? 8 : 0,
+ kLoseContextWhenOutOfMemory,
+ normalized_init.lose_context_when_out_of_memory ? 1 : 0, };
+ std::vector<int32> attribs(attributes, attributes + arraysize(attributes));
+
+ decoder_.reset(GLES2Decoder::Create(group_.get()));
+ decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
+ decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
+ decoder_->Initialize(surface_,
+ context_,
+ false,
+ surface_->GetSize(),
+ DisallowedFeatures(),
+ attribs);
+ decoder_->MakeCurrent();
+ decoder_->set_engine(engine_.get());
+ decoder_->BeginDecoding();
+
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceBufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenBuffersImmediate>(client_buffer_id_);
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceFramebufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenFramebuffersImmediate>(client_framebuffer_id_);
+ EXPECT_CALL(*gl_, GenRenderbuffersEXT(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceRenderbufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenRenderbuffersImmediate>(client_renderbuffer_id_);
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenTexturesImmediate>(client_texture_id_);
+ EXPECT_CALL(*gl_, GenBuffersARB(_, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceElementBufferId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenBuffersImmediate>(client_element_buffer_id_);
+
+ DoCreateProgram(client_program_id_, kServiceProgramId);
+ DoCreateShader(GL_VERTEX_SHADER, client_shader_id_, kServiceShaderId);
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+void GLES2DecoderTestBase::ResetDecoder() {
+ if (!decoder_.get())
+ return;
+ // All Tests should have read all their GLErrors before getting here.
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, _))
+ .Times(2)
+ .RetiresOnSaturation();
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, Pointee(kServiceVertexArrayId)))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ decoder_->EndDecoding();
+ decoder_->Destroy(true);
+ decoder_.reset();
+ group_->Destroy(mock_decoder_.get(), false);
+ engine_.reset();
+ ::gfx::MockGLInterface::SetGLInterface(NULL);
+ gl_.reset();
+ gfx::ClearGLBindings();
+}
+
+void GLES2DecoderTestBase::TearDown() {
+ ResetDecoder();
+}
+
+void GLES2DecoderTestBase::ExpectEnableDisable(GLenum cap, bool enable) {
+ if (enable) {
+ EXPECT_CALL(*gl_, Enable(cap))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, Disable(cap))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+
+GLint GLES2DecoderTestBase::GetGLError() {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ return static_cast<GLint>(*GetSharedMemoryAs<GLenum*>());
+}
+
+void GLES2DecoderTestBase::DoCreateShader(
+ GLenum shader_type, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, CreateShader(shader_type))
+ .Times(1)
+ .WillOnce(Return(service_id))
+ .RetiresOnSaturation();
+ cmds::CreateShader cmd;
+ cmd.Init(shader_type, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsShader(GLuint client_id) {
+ return IsObjectHelper<cmds::IsShader, cmds::IsShader::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteShader(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteShader(service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::DeleteShader cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoCreateProgram(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(service_id))
+ .RetiresOnSaturation();
+ cmds::CreateProgram cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsProgram(GLuint client_id) {
+ return IsObjectHelper<cmds::IsProgram, cmds::IsProgram::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteProgram(
+ GLuint client_id, GLuint /* service_id */) {
+ cmds::DeleteProgram cmd;
+ cmd.Init(client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::SetBucketAsCString(
+ uint32 bucket_id, const char* str) {
+ uint32 size = str ? (strlen(str) + 1) : 0;
+ cmd::SetBucketSize cmd1;
+ cmd1.Init(bucket_id, size);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ if (str) {
+ memcpy(shared_memory_address_, str, size);
+ cmd::SetBucketData cmd2;
+ cmd2.Init(bucket_id, 0, size, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ ClearSharedMemory();
+ }
+}
+
+void GLES2DecoderTestBase::SetupClearTextureExpectations(
+ GLuint service_id,
+ GLuint old_service_id,
+ GLenum bind_target,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLenum format,
+ GLenum type,
+ GLsizei width,
+ GLsizei height) {
+ EXPECT_CALL(*gl_, BindTexture(bind_target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(
+ target, level, internal_format, width, height, 0, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(bind_target, old_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForFramebufferClearing(
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ SetupExpectationsForFramebufferClearingMulti(
+ 0,
+ 0,
+ target,
+ clear_bits,
+ restore_red,
+ restore_green,
+ restore_blue,
+ restore_alpha,
+ restore_stencil,
+ restore_depth,
+ restore_scissor_test);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForRestoreClearState(
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ EXPECT_CALL(*gl_, ClearColor(
+ restore_red, restore_green, restore_blue, restore_alpha))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(restore_stencil))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearDepth(restore_depth))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (restore_scissor_test) {
+ EXPECT_CALL(*gl_, Enable(GL_SCISSOR_TEST))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForFramebufferClearingMulti(
+ GLuint read_framebuffer_service_id,
+ GLuint draw_framebuffer_service_id,
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test) {
+ // TODO(gman): Figure out why InSequence stopped working.
+ // InSequence sequence;
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(target))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_READ_FRAMEBUFFER_EXT, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, read_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if ((clear_bits & GL_COLOR_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForColorMask(true, true, true, true);
+ }
+ if ((clear_bits & GL_STENCIL_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearStencil(0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, StencilMask(static_cast<GLuint>(-1)))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if ((clear_bits & GL_DEPTH_BUFFER_BIT) != 0) {
+ EXPECT_CALL(*gl_, ClearDepth(1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForDepthMask(true);
+ }
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, false);
+ EXPECT_CALL(*gl_, Clear(clear_bits))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForRestoreClearState(
+ restore_red, restore_green, restore_blue, restore_alpha,
+ restore_stencil, restore_depth, restore_scissor_test);
+ if (target == GL_READ_FRAMEBUFFER_EXT) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_READ_FRAMEBUFFER_EXT, read_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(
+ GL_DRAW_FRAMEBUFFER_EXT, draw_framebuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupShaderForUniform(GLenum uniform_type) {
+ static AttribInfo attribs[] = {
+ { "foo", 1, GL_FLOAT, 1, },
+ { "goo", 1, GL_FLOAT, 2, },
+ };
+ UniformInfo uniforms[] = {
+ { "bar", 1, uniform_type, 0, 2, -1, },
+ { "car", 4, uniform_type, 1, 1, -1, },
+ };
+ const GLuint kClientVertexShaderId = 5001;
+ const GLuint kServiceVertexShaderId = 6001;
+ const GLuint kClientFragmentShaderId = 5002;
+ const GLuint kServiceFragmentShaderId = 6002;
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ kClientVertexShaderId, kServiceVertexShaderId,
+ kClientFragmentShaderId, kServiceFragmentShaderId);
+
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBindBuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindBuffer(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindBuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsBuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsBuffer, cmds::IsBuffer::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteBuffer(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteBuffersARB(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteBuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForColorMask(bool red,
+ bool green,
+ bool blue,
+ bool alpha) {
+ if (ignore_cached_state_for_test_ || cached_color_mask_red_ != red ||
+ cached_color_mask_green_ != green || cached_color_mask_blue_ != blue ||
+ cached_color_mask_alpha_ != alpha) {
+ cached_color_mask_red_ = red;
+ cached_color_mask_green_ = green;
+ cached_color_mask_blue_ = blue;
+ cached_color_mask_alpha_ = alpha;
+ EXPECT_CALL(*gl_, ColorMask(red, green, blue, alpha))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForDepthMask(bool mask) {
+ if (ignore_cached_state_for_test_ || cached_depth_mask_ != mask) {
+ cached_depth_mask_ = mask;
+ EXPECT_CALL(*gl_, DepthMask(mask)).Times(1).RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForStencilMask(GLuint front_mask,
+ GLuint back_mask) {
+ if (ignore_cached_state_for_test_ ||
+ cached_stencil_front_mask_ != front_mask) {
+ cached_stencil_front_mask_ = front_mask;
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, front_mask))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ if (ignore_cached_state_for_test_ ||
+ cached_stencil_back_mask_ != back_mask) {
+ cached_stencil_back_mask_ = back_mask;
+ EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, back_mask))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForEnableDisable(GLenum cap,
+ bool enable) {
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags_.cached_blend == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_blend = enable;
+ break;
+ case GL_CULL_FACE:
+ if (enable_flags_.cached_cull_face == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_cull_face = enable;
+ break;
+ case GL_DEPTH_TEST:
+ if (enable_flags_.cached_depth_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_depth_test = enable;
+ break;
+ case GL_DITHER:
+ if (enable_flags_.cached_dither == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_dither = enable;
+ break;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags_.cached_polygon_offset_fill == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_polygon_offset_fill = enable;
+ break;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags_.cached_sample_alpha_to_coverage == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_sample_alpha_to_coverage = enable;
+ break;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags_.cached_sample_coverage == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_sample_coverage = enable;
+ break;
+ case GL_SCISSOR_TEST:
+ if (enable_flags_.cached_scissor_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_scissor_test = enable;
+ break;
+ case GL_STENCIL_TEST:
+ if (enable_flags_.cached_stencil_test == enable &&
+ !ignore_cached_state_for_test_)
+ return;
+ enable_flags_.cached_stencil_test = enable;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+ if (enable) {
+ EXPECT_CALL(*gl_, Enable(cap)).Times(1).RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, Disable(cap)).Times(1).RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForApplyingDirtyState(
+ bool framebuffer_is_rgb,
+ bool framebuffer_has_depth,
+ bool framebuffer_has_stencil,
+ GLuint color_bits,
+ bool depth_mask,
+ bool depth_enabled,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask,
+ bool stencil_enabled) {
+ bool color_mask_red = (color_bits & 0x1000) != 0;
+ bool color_mask_green = (color_bits & 0x0100) != 0;
+ bool color_mask_blue = (color_bits & 0x0010) != 0;
+ bool color_mask_alpha = (color_bits & 0x0001) && !framebuffer_is_rgb;
+
+ SetupExpectationsForColorMask(
+ color_mask_red, color_mask_green, color_mask_blue, color_mask_alpha);
+ SetupExpectationsForDepthMask(depth_mask);
+ SetupExpectationsForStencilMask(front_stencil_mask, back_stencil_mask);
+ SetupExpectationsForEnableDisable(GL_DEPTH_TEST,
+ framebuffer_has_depth && depth_enabled);
+ SetupExpectationsForEnableDisable(GL_STENCIL_TEST,
+ framebuffer_has_stencil && stencil_enabled);
+}
+
+void GLES2DecoderTestBase::SetupExpectationsForApplyingDefaultDirtyState() {
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ true, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+}
+
+GLES2DecoderTestBase::EnableFlags::EnableFlags()
+ : cached_blend(false),
+ cached_cull_face(false),
+ cached_depth_test(false),
+ cached_dither(true),
+ cached_polygon_offset_fill(false),
+ cached_sample_alpha_to_coverage(false),
+ cached_sample_coverage(false),
+ cached_scissor_test(false),
+ cached_stencil_test(false) {
+}
+
+void GLES2DecoderTestBase::DoBindFramebuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindFramebuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsFramebuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsFramebuffer, cmds::IsFramebuffer::Result>(
+ client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteFramebuffer(
+ GLuint client_id, GLuint service_id,
+ bool reset_draw, GLenum draw_target, GLuint draw_id,
+ bool reset_read, GLenum read_target, GLuint read_id) {
+ if (reset_draw) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(draw_target, draw_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (reset_read) {
+ EXPECT_CALL(*gl_, BindFramebufferEXT(read_target, read_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteFramebuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoBindRenderbuffer(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ service_renderbuffer_id_ = service_id;
+ service_renderbuffer_valid_ = true;
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindRenderbuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoRenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLenum gl_format,
+ GLsizei width,
+ GLsizei height) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ RenderbufferStorageMultisampleEXT(
+ target, samples, gl_format, width, height))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(target, samples, internal_format, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+void GLES2DecoderTestBase::RestoreRenderbufferBindings() {
+ GetDecoder()->RestoreRenderbufferBindings();
+ service_renderbuffer_valid_ = false;
+}
+
+void GLES2DecoderTestBase::EnsureRenderbufferBound(bool expect_bind) {
+ EXPECT_NE(expect_bind, service_renderbuffer_valid_);
+
+ if (expect_bind) {
+ service_renderbuffer_valid_ = true;
+ EXPECT_CALL(*gl_,
+ BindRenderbufferEXT(GL_RENDERBUFFER, service_renderbuffer_id_))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, BindRenderbufferEXT(_, _)).Times(0);
+ }
+}
+
+bool GLES2DecoderTestBase::DoIsRenderbuffer(GLuint client_id) {
+ return IsObjectHelper<cmds::IsRenderbuffer, cmds::IsRenderbuffer::Result>(
+ client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteRenderbuffer(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteRenderbuffersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoBindTexture(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindTexture(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindTexture cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+bool GLES2DecoderTestBase::DoIsTexture(GLuint client_id) {
+ return IsObjectHelper<cmds::IsTexture, cmds::IsTexture::Result>(client_id);
+}
+
+void GLES2DecoderTestBase::DoDeleteTexture(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteTexturesImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoTexImage2D(
+ GLenum target, GLint level, GLenum internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(target, level, internal_format,
+ width, height, border, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(target, level, internal_format, width, height, format,
+ type, shared_memory_id, shared_memory_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoTexImage2DConvertInternalFormat(
+ GLenum target, GLint level, GLenum requested_internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset,
+ GLenum expected_internal_format) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexImage2D(target, level, expected_internal_format,
+ width, height, border, format, type, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(target, level, requested_internal_format, width, height,
+ format, type, shared_memory_id, shared_memory_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoCompressedTexImage2D(
+ GLenum target, GLint level, GLenum format,
+ GLsizei width, GLsizei height, GLint border,
+ GLsizei size, uint32 bucket_id) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CompressedTexImage2D(
+ target, level, format, width, height, border, size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(bucket_id);
+ bucket->SetSize(size);
+ cmds::CompressedTexImage2DBucket cmd;
+ cmd.Init(
+ target, level, format, width, height,
+ bucket_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoRenderbufferStorage(
+ GLenum target, GLenum internal_format, GLenum actual_format,
+ GLsizei width, GLsizei height, GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(
+ target, actual_format, width, height))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::RenderbufferStorage cmd;
+ cmd.Init(target, internal_format, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum textarget,
+ GLuint texture_client_id, GLuint texture_service_id, GLint level,
+ GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(
+ target, attachment, textarget, texture_service_id, level))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::FramebufferTexture2D cmd;
+ cmd.Init(target, attachment, textarget, texture_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoFramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffer_target,
+ GLuint renderbuffer_client_id,
+ GLuint renderbuffer_service_id,
+ GLenum error) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(
+ target, attachment, renderbuffer_target, renderbuffer_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ cmds::FramebufferRenderbuffer cmd;
+ cmd.Init(target, attachment, renderbuffer_target, renderbuffer_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoVertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLsizei stride, GLuint offset) {
+ EXPECT_CALL(*gl_,
+ VertexAttribPointer(index, size, type, GL_FALSE, stride,
+ BufferOffset(offset)))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::VertexAttribPointer cmd;
+ cmd.Init(index, size, GL_FLOAT, GL_FALSE, stride, offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoVertexAttribDivisorANGLE(
+ GLuint index, GLuint divisor) {
+ EXPECT_CALL(*gl_,
+ VertexAttribDivisorANGLE(index, divisor))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::VertexAttribDivisorANGLE cmd;
+ cmd.Init(index, divisor);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::AddExpectationsForGenVertexArraysOES(){
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, GenVertexArraysOES(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceVertexArrayId))
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForDeleteVertexArraysOES(){
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForDeleteBoundVertexArraysOES() {
+ // Expectations are the same as a delete, followed by binding VAO 0.
+ AddExpectationsForDeleteVertexArraysOES();
+ AddExpectationsForBindVertexArrayOES();
+}
+
+void GLES2DecoderTestBase::AddExpectationsForBindVertexArrayOES() {
+ if (group_->feature_info()->feature_flags().native_vertex_array_object) {
+ EXPECT_CALL(*gl_, BindVertexArrayOES(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ for (uint32 vv = 0; vv < group_->max_vertex_attribs(); ++vv) {
+ AddExpectationsForRestoreAttribState(vv);
+ }
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ELEMENT_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForRestoreAttribState(GLuint attrib) {
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, VertexAttribPointer(attrib, _, _, _, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(attrib, _))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ if (attrib != 0 ||
+ gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+
+ // TODO(bajones): Not sure if I can tell which of these will be called
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(attrib))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(attrib))
+ .Times(testing::AtMost(1))
+ .RetiresOnSaturation();
+ }
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const int GLES2DecoderTestBase::kBackBufferWidth;
+const int GLES2DecoderTestBase::kBackBufferHeight;
+
+const GLint GLES2DecoderTestBase::kMaxTextureSize;
+const GLint GLES2DecoderTestBase::kMaxCubeMapTextureSize;
+const GLint GLES2DecoderTestBase::kNumVertexAttribs;
+const GLint GLES2DecoderTestBase::kNumTextureUnits;
+const GLint GLES2DecoderTestBase::kMaxTextureImageUnits;
+const GLint GLES2DecoderTestBase::kMaxVertexTextureImageUnits;
+const GLint GLES2DecoderTestBase::kMaxFragmentUniformVectors;
+const GLint GLES2DecoderTestBase::kMaxVaryingVectors;
+const GLint GLES2DecoderTestBase::kMaxVertexUniformVectors;
+const GLint GLES2DecoderTestBase::kMaxViewportWidth;
+const GLint GLES2DecoderTestBase::kMaxViewportHeight;
+
+const GLint GLES2DecoderTestBase::kViewportX;
+const GLint GLES2DecoderTestBase::kViewportY;
+const GLint GLES2DecoderTestBase::kViewportWidth;
+const GLint GLES2DecoderTestBase::kViewportHeight;
+
+const GLuint GLES2DecoderTestBase::kServiceAttrib0BufferId;
+const GLuint GLES2DecoderTestBase::kServiceFixedAttribBufferId;
+
+const GLuint GLES2DecoderTestBase::kServiceBufferId;
+const GLuint GLES2DecoderTestBase::kServiceFramebufferId;
+const GLuint GLES2DecoderTestBase::kServiceRenderbufferId;
+const GLuint GLES2DecoderTestBase::kServiceTextureId;
+const GLuint GLES2DecoderTestBase::kServiceProgramId;
+const GLuint GLES2DecoderTestBase::kServiceShaderId;
+const GLuint GLES2DecoderTestBase::kServiceElementBufferId;
+const GLuint GLES2DecoderTestBase::kServiceQueryId;
+const GLuint GLES2DecoderTestBase::kServiceVertexArrayId;
+
+const int32 GLES2DecoderTestBase::kSharedMemoryId;
+const size_t GLES2DecoderTestBase::kSharedBufferSize;
+const uint32 GLES2DecoderTestBase::kSharedMemoryOffset;
+const int32 GLES2DecoderTestBase::kInvalidSharedMemoryId;
+const uint32 GLES2DecoderTestBase::kInvalidSharedMemoryOffset;
+const uint32 GLES2DecoderTestBase::kInitialResult;
+const uint8 GLES2DecoderTestBase::kInitialMemoryValue;
+
+const uint32 GLES2DecoderTestBase::kNewClientId;
+const uint32 GLES2DecoderTestBase::kNewServiceId;
+const uint32 GLES2DecoderTestBase::kInvalidClientId;
+
+const GLuint GLES2DecoderTestBase::kServiceVertexShaderId;
+const GLuint GLES2DecoderTestBase::kServiceFragmentShaderId;
+
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumShaderId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumProgramId;
+
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumTextureBufferId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumVertexBufferId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumFBOId;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumPositionAttrib;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumTexAttrib;
+const GLuint GLES2DecoderTestBase::kServiceCopyTextureChromiumSamplerLocation;
+
+const GLsizei GLES2DecoderTestBase::kNumVertices;
+const GLsizei GLES2DecoderTestBase::kNumIndices;
+const int GLES2DecoderTestBase::kValidIndexRangeStart;
+const int GLES2DecoderTestBase::kValidIndexRangeCount;
+const int GLES2DecoderTestBase::kInvalidIndexRangeStart;
+const int GLES2DecoderTestBase::kInvalidIndexRangeCount;
+const int GLES2DecoderTestBase::kOutOfRangeIndexRangeEnd;
+const GLuint GLES2DecoderTestBase::kMaxValidIndex;
+
+const GLint GLES2DecoderTestBase::kMaxAttribLength;
+const GLint GLES2DecoderTestBase::kAttrib1Size;
+const GLint GLES2DecoderTestBase::kAttrib2Size;
+const GLint GLES2DecoderTestBase::kAttrib3Size;
+const GLint GLES2DecoderTestBase::kAttrib1Location;
+const GLint GLES2DecoderTestBase::kAttrib2Location;
+const GLint GLES2DecoderTestBase::kAttrib3Location;
+const GLenum GLES2DecoderTestBase::kAttrib1Type;
+const GLenum GLES2DecoderTestBase::kAttrib2Type;
+const GLenum GLES2DecoderTestBase::kAttrib3Type;
+const GLint GLES2DecoderTestBase::kInvalidAttribLocation;
+const GLint GLES2DecoderTestBase::kBadAttribIndex;
+
+const GLint GLES2DecoderTestBase::kMaxUniformLength;
+const GLint GLES2DecoderTestBase::kUniform1Size;
+const GLint GLES2DecoderTestBase::kUniform2Size;
+const GLint GLES2DecoderTestBase::kUniform3Size;
+const GLint GLES2DecoderTestBase::kUniform1RealLocation;
+const GLint GLES2DecoderTestBase::kUniform2RealLocation;
+const GLint GLES2DecoderTestBase::kUniform2ElementRealLocation;
+const GLint GLES2DecoderTestBase::kUniform3RealLocation;
+const GLint GLES2DecoderTestBase::kUniform1FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform2FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform2ElementFakeLocation;
+const GLint GLES2DecoderTestBase::kUniform3FakeLocation;
+const GLint GLES2DecoderTestBase::kUniform1DesiredLocation;
+const GLint GLES2DecoderTestBase::kUniform2DesiredLocation;
+const GLint GLES2DecoderTestBase::kUniform3DesiredLocation;
+const GLenum GLES2DecoderTestBase::kUniform1Type;
+const GLenum GLES2DecoderTestBase::kUniform2Type;
+const GLenum GLES2DecoderTestBase::kUniform3Type;
+const GLenum GLES2DecoderTestBase::kUniformCubemapType;
+const GLint GLES2DecoderTestBase::kInvalidUniformLocation;
+const GLint GLES2DecoderTestBase::kBadUniformIndex;
+
+#endif
+
+const char* GLES2DecoderTestBase::kAttrib1Name = "attrib1";
+const char* GLES2DecoderTestBase::kAttrib2Name = "attrib2";
+const char* GLES2DecoderTestBase::kAttrib3Name = "attrib3";
+const char* GLES2DecoderTestBase::kUniform1Name = "uniform1";
+const char* GLES2DecoderTestBase::kUniform2Name = "uniform2[0]";
+const char* GLES2DecoderTestBase::kUniform3Name = "uniform3[0]";
+
+void GLES2DecoderTestBase::SetupDefaultProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniform1Type,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::SetupCubemapProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniformCubemapType,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation, },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation, },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation, },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::SetupSamplerExternalProgram() {
+ {
+ static AttribInfo attribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static UniformInfo uniforms[] = {
+ { kUniform1Name, kUniform1Size, kUniformSamplerExternalType,
+ kUniform1FakeLocation, kUniform1RealLocation,
+ kUniform1DesiredLocation, },
+ { kUniform2Name, kUniform2Size, kUniform2Type,
+ kUniform2FakeLocation, kUniform2RealLocation,
+ kUniform2DesiredLocation, },
+ { kUniform3Name, kUniform3Size, kUniform3Type,
+ kUniform3FakeLocation, kUniform3RealLocation,
+ kUniform3DesiredLocation, },
+ };
+ SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
+ client_program_id_, kServiceProgramId,
+ client_vertex_shader_id_, kServiceVertexShaderId,
+ client_fragment_shader_id_, kServiceFragmentShaderId);
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderWithShaderTestBase::TearDown() {
+ GLES2DecoderTestBase::TearDown();
+}
+
+void GLES2DecoderTestBase::SetupShader(
+ GLES2DecoderTestBase::AttribInfo* attribs, size_t num_attribs,
+ GLES2DecoderTestBase::UniformInfo* uniforms, size_t num_uniforms,
+ GLuint program_client_id, GLuint program_service_id,
+ GLuint vertex_shader_client_id, GLuint vertex_shader_service_id,
+ GLuint fragment_shader_client_id, GLuint fragment_shader_service_id) {
+ {
+ InSequence s;
+
+ EXPECT_CALL(*gl_,
+ AttachShader(program_service_id, vertex_shader_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ AttachShader(program_service_id, fragment_shader_service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ TestHelper::SetupShader(
+ gl_.get(), attribs, num_attribs, uniforms, num_uniforms,
+ program_service_id);
+ }
+
+ DoCreateShader(
+ GL_VERTEX_SHADER, vertex_shader_client_id, vertex_shader_service_id);
+ DoCreateShader(
+ GL_FRAGMENT_SHADER, fragment_shader_client_id,
+ fragment_shader_service_id);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(vertex_shader_client_id), true);
+ TestHelper::SetShaderStates(
+ gl_.get(), GetShader(fragment_shader_client_id), true);
+
+ cmds::AttachShader attach_cmd;
+ attach_cmd.Init(program_client_id, vertex_shader_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ attach_cmd.Init(program_client_id, fragment_shader_client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
+
+ cmds::LinkProgram link_cmd;
+ link_cmd.Init(program_client_id);
+
+ EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
+}
+
+void GLES2DecoderTestBase::DoEnableDisable(GLenum cap, bool enable) {
+ SetupExpectationsForEnableDisable(cap, enable);
+ if (enable) {
+ cmds::Enable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ } else {
+ cmds::Disable cmd;
+ cmd.Init(cap);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+void GLES2DecoderTestBase::DoEnableVertexAttribArray(GLint index) {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(index))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(index);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBufferData(GLenum target, GLsizei size) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(target, size, _, GL_STREAM_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::BufferData cmd;
+ cmd.Init(target, size, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoBufferSubData(
+ GLenum target, GLint offset, GLsizei size, const void* data) {
+ EXPECT_CALL(*gl_, BufferSubData(target, offset, size,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ memcpy(shared_memory_address_, data, size);
+ cmds::BufferSubData cmd;
+ cmd.Init(target, offset, size, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::SetupVertexBuffer() {
+ DoEnableVertexAttribArray(1);
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ GLfloat f = 0;
+ DoBufferData(GL_ARRAY_BUFFER, kNumVertices * 2 * sizeof(f));
+}
+
+void GLES2DecoderTestBase::SetupAllNeededVertexBuffers() {
+ DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
+ DoBufferData(GL_ARRAY_BUFFER, kNumVertices * 16 * sizeof(float));
+ DoEnableVertexAttribArray(0);
+ DoEnableVertexAttribArray(1);
+ DoEnableVertexAttribArray(2);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribPointer(2, 2, GL_FLOAT, 0, 0);
+}
+
+void GLES2DecoderTestBase::SetupIndexBuffer() {
+ DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
+ client_element_buffer_id_,
+ kServiceElementBufferId);
+ static const GLshort indices[] = {100, 1, 2, 3, 4, 5, 6, 7, 100, 9};
+ COMPILE_ASSERT(arraysize(indices) == kNumIndices, Indices_is_not_10);
+ DoBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices));
+ DoBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, 2, indices);
+ DoBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 2, sizeof(indices) - 2, &indices[1]);
+}
+
+void GLES2DecoderTestBase::SetupTexture() {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+};
+
+void GLES2DecoderTestBase::DeleteVertexBuffer() {
+ DoDeleteBuffer(client_buffer_id_, kServiceBufferId);
+}
+
+void GLES2DecoderTestBase::DeleteIndexBuffer() {
+ DoDeleteBuffer(client_element_buffer_id_, kServiceElementBufferId);
+}
+
+void GLES2DecoderTestBase::AddExpectationsForSimulatedAttrib0WithError(
+ GLsizei num_vertices, GLuint buffer_id, GLenum error) {
+ if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
+ return;
+ }
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceAttrib0BufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BufferData(GL_ARRAY_BUFFER,
+ num_vertices * sizeof(GLfloat) * 4,
+ _, GL_DYNAMIC_DRAW))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (error == GL_NO_ERROR) {
+ EXPECT_CALL(*gl_, BufferSubData(
+ GL_ARRAY_BUFFER, 0, num_vertices * sizeof(GLfloat) * 4, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, buffer_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+}
+
+void GLES2DecoderTestBase::AddExpectationsForSimulatedAttrib0(
+ GLsizei num_vertices, GLuint buffer_id) {
+ AddExpectationsForSimulatedAttrib0WithError(
+ num_vertices, buffer_id, GL_NO_ERROR);
+}
+
+void GLES2DecoderTestBase::SetupMockGLBehaviors() {
+ ON_CALL(*gl_, BindVertexArrayOES(_))
+ .WillByDefault(Invoke(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnBindVertexArrayOES));
+ ON_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, _))
+ .WillByDefault(WithArg<1>(Invoke(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnBindArrayBuffer)));
+ ON_CALL(*gl_, VertexAttribPointer(_, _, _, _, _, NULL))
+ .WillByDefault(InvokeWithoutArgs(
+ &gl_states_,
+ &GLES2DecoderTestBase::MockGLStates::OnVertexAttribNullPointer));
+}
+
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::
+MockCommandBufferEngine() {
+
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kSharedBufferSize);
+ valid_buffer_ = MakeBufferFromSharedMemory(shm.Pass(), kSharedBufferSize);
+
+ ClearSharedMemory();
+}
+
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::
+~MockCommandBufferEngine() {}
+
+scoped_refptr<gpu::Buffer>
+GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::GetSharedMemoryBuffer(
+ int32 shm_id) {
+ return shm_id == kSharedMemoryId ? valid_buffer_ : invalid_buffer_;
+}
+
+void GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::set_token(
+ int32 token) {
+ DCHECK(false);
+}
+
+bool GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::SetGetBuffer(
+ int32 /* transfer_buffer_id */) {
+ DCHECK(false);
+ return false;
+}
+
+bool GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::SetGetOffset(
+ int32 offset) {
+ DCHECK(false);
+ return false;
+}
+
+int32 GLES2DecoderWithShaderTestBase::MockCommandBufferEngine::GetGetOffset() {
+ DCHECK(false);
+ return 0;
+}
+
+void GLES2DecoderWithShaderTestBase::SetUp() {
+ GLES2DecoderTestBase::SetUp();
+ SetupDefaultProgram();
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
new file mode 100644
index 0000000..0730752
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -0,0 +1,643 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_surface_stub.h"
+#include "ui/gl/gl_mock.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class MemoryTracker;
+
+class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
+ public:
+ GLES2DecoderTestBase();
+ virtual ~GLES2DecoderTestBase();
+
+ // Template to call glGenXXX functions.
+ template <typename T>
+ void GenHelper(GLuint client_id) {
+ int8 buffer[sizeof(T) + sizeof(client_id)];
+ T& cmd = *reinterpret_cast<T*>(&buffer);
+ cmd.Init(1, &client_id);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_id)));
+ }
+
+ // This template exists solely so we can specialize it for
+ // certain commands.
+ template <typename T, int id>
+ void SpecializedSetup(bool valid) {
+ }
+
+ template <typename T>
+ T* GetImmediateAs() {
+ return reinterpret_cast<T*>(immediate_buffer_);
+ }
+
+ template <typename T, typename Command>
+ T GetImmediateDataAs(Command* cmd) {
+ return reinterpret_cast<T>(ImmediateDataAddress(cmd));
+ }
+
+ void ClearSharedMemory() {
+ engine_->ClearSharedMemory();
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ return decoder_->DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd)), 0);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ return decoder_->DoCommands(
+ 1, (const void*)&cmd, ComputeNumEntries(sizeof(cmd) + data_size), 0);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs() {
+ return reinterpret_cast<T>(shared_memory_address_);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAsWithOffset(uint32 offset) {
+ void* ptr = reinterpret_cast<int8*>(shared_memory_address_) + offset;
+ return reinterpret_cast<T>(ptr);
+ }
+
+ Buffer* GetBuffer(GLuint service_id) {
+ return group_->buffer_manager()->GetBuffer(service_id);
+ }
+
+ Framebuffer* GetFramebuffer(GLuint service_id) {
+ return group_->framebuffer_manager()->GetFramebuffer(service_id);
+ }
+
+ Renderbuffer* GetRenderbuffer(
+ GLuint service_id) {
+ return group_->renderbuffer_manager()->GetRenderbuffer(service_id);
+ }
+
+ TextureRef* GetTexture(GLuint client_id) {
+ return group_->texture_manager()->GetTexture(client_id);
+ }
+
+ Shader* GetShader(GLuint client_id) {
+ return group_->shader_manager()->GetShader(client_id);
+ }
+
+ Program* GetProgram(GLuint client_id) {
+ return group_->program_manager()->GetProgram(client_id);
+ }
+
+ QueryManager::Query* GetQueryInfo(GLuint client_id) {
+ return decoder_->GetQueryManager()->GetQuery(client_id);
+ }
+
+ // This name doesn't match the underlying function, but doing it this way
+ // prevents the need to special-case the unit test generation
+ VertexAttribManager* GetVertexArrayInfo(GLuint client_id) {
+ return decoder_->GetVertexArrayManager()->GetVertexAttribManager(client_id);
+ }
+
+ ProgramManager* program_manager() {
+ return group_->program_manager();
+ }
+
+ ImageManager* GetImageManager() { return decoder_->GetImageManager(); }
+
+ void DoCreateProgram(GLuint client_id, GLuint service_id);
+ void DoCreateShader(GLenum shader_type, GLuint client_id, GLuint service_id);
+
+ void SetBucketAsCString(uint32 bucket_id, const char* str);
+
+ void set_memory_tracker(MemoryTracker* memory_tracker) {
+ memory_tracker_ = memory_tracker;
+ }
+
+ struct InitState {
+ InitState();
+
+ std::string extensions;
+ std::string gl_version;
+ bool has_alpha;
+ bool has_depth;
+ bool has_stencil;
+ bool request_alpha;
+ bool request_depth;
+ bool request_stencil;
+ bool bind_generates_resource;
+ bool lose_context_when_out_of_memory;
+ bool use_native_vao; // default is true.
+ };
+
+ void InitDecoder(const InitState& init);
+ void InitDecoderWithCommandLine(const InitState& init,
+ const base::CommandLine* command_line);
+
+ void ResetDecoder();
+
+ const ContextGroup& group() const {
+ return *group_.get();
+ }
+
+ ::testing::StrictMock< ::gfx::MockGLInterface>* GetGLMock() const {
+ return gl_.get();
+ }
+
+ GLES2Decoder* GetDecoder() const {
+ return decoder_.get();
+ }
+
+ typedef TestHelper::AttribInfo AttribInfo;
+ typedef TestHelper::UniformInfo UniformInfo;
+
+ void SetupShader(
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint client_id, GLuint service_id,
+ GLuint vertex_shader_client_id, GLuint vertex_shader_service_id,
+ GLuint fragment_shader_client_id, GLuint fragment_shader_service_id);
+
+ void SetupInitCapabilitiesExpectations();
+ void SetupInitStateExpectations();
+ void ExpectEnableDisable(GLenum cap, bool enable);
+
+ // Setups up a shader for testing glUniform.
+ void SetupShaderForUniform(GLenum uniform_type);
+ void SetupDefaultProgram();
+ void SetupCubemapProgram();
+ void SetupSamplerExternalProgram();
+ void SetupTexture();
+
+ // Note that the error is returned as GLint instead of GLenum.
+ // This is because there is a mismatch in the types of GLenum and
+ // the error values GL_NO_ERROR, GL_INVALID_ENUM, etc. GLenum is
+ // typedef'd as unsigned int while the error values are defined as
+ // integers. This is problematic for template functions such as
+ // EXPECT_EQ that expect both types to be the same.
+ GLint GetGLError();
+
+ void DoBindBuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindFramebuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindRenderbuffer(GLenum target, GLuint client_id, GLuint service_id);
+ void DoRenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internal_format,
+ GLenum gl_format,
+ GLsizei width,
+ GLsizei height);
+ void RestoreRenderbufferBindings();
+ void EnsureRenderbufferBound(bool expect_bind);
+ void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
+ void DoBindVertexArrayOES(GLuint client_id, GLuint service_id);
+
+ bool DoIsBuffer(GLuint client_id);
+ bool DoIsFramebuffer(GLuint client_id);
+ bool DoIsProgram(GLuint client_id);
+ bool DoIsRenderbuffer(GLuint client_id);
+ bool DoIsShader(GLuint client_id);
+ bool DoIsTexture(GLuint client_id);
+
+ void DoDeleteBuffer(GLuint client_id, GLuint service_id);
+ void DoDeleteFramebuffer(
+ GLuint client_id, GLuint service_id,
+ bool reset_draw, GLenum draw_target, GLuint draw_id,
+ bool reset_read, GLenum read_target, GLuint read_id);
+ void DoDeleteProgram(GLuint client_id, GLuint service_id);
+ void DoDeleteRenderbuffer(GLuint client_id, GLuint service_id);
+ void DoDeleteShader(GLuint client_id, GLuint service_id);
+ void DoDeleteTexture(GLuint client_id, GLuint service_id);
+
+ void DoCompressedTexImage2D(
+ GLenum target, GLint level, GLenum format,
+ GLsizei width, GLsizei height, GLint border,
+ GLsizei size, uint32 bucket_id);
+ void DoTexImage2D(
+ GLenum target, GLint level, GLenum internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset);
+ void DoTexImage2DConvertInternalFormat(
+ GLenum target, GLint level, GLenum requested_internal_format,
+ GLsizei width, GLsizei height, GLint border,
+ GLenum format, GLenum type,
+ uint32 shared_memory_id, uint32 shared_memory_offset,
+ GLenum expected_internal_format);
+ void DoRenderbufferStorage(
+ GLenum target, GLenum internal_format, GLenum actual_format,
+ GLsizei width, GLsizei height, GLenum error);
+ void DoFramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffer_target,
+ GLuint renderbuffer_client_id,
+ GLuint renderbuffer_service_id,
+ GLenum error);
+ void DoFramebufferTexture2D(
+ GLenum target, GLenum attachment, GLenum tex_target,
+ GLuint texture_client_id, GLuint texture_service_id,
+ GLint level, GLenum error);
+ void DoVertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLsizei stride, GLuint offset);
+ void DoVertexAttribDivisorANGLE(GLuint index, GLuint divisor);
+
+ void DoEnableDisable(GLenum cap, bool enable);
+
+ void DoEnableVertexAttribArray(GLint index);
+
+ void DoBufferData(GLenum target, GLsizei size);
+
+ void DoBufferSubData(
+ GLenum target, GLint offset, GLsizei size, const void* data);
+
+ void SetupVertexBuffer();
+ void SetupAllNeededVertexBuffers();
+
+ void SetupIndexBuffer();
+
+ void DeleteVertexBuffer();
+
+ void DeleteIndexBuffer();
+
+ void SetupClearTextureExpectations(
+ GLuint service_id,
+ GLuint old_service_id,
+ GLenum bind_target,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLenum format,
+ GLenum type,
+ GLsizei width,
+ GLsizei height);
+
+ void SetupExpectationsForRestoreClearState(
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForFramebufferClearing(
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForFramebufferClearingMulti(
+ GLuint read_framebuffer_service_id,
+ GLuint draw_framebuffer_service_id,
+ GLenum target,
+ GLuint clear_bits,
+ GLclampf restore_red,
+ GLclampf restore_green,
+ GLclampf restore_blue,
+ GLclampf restore_alpha,
+ GLuint restore_stencil,
+ GLclampf restore_depth,
+ bool restore_scissor_test);
+
+ void SetupExpectationsForDepthMask(bool mask);
+ void SetupExpectationsForEnableDisable(GLenum cap, bool enable);
+ void SetupExpectationsForColorMask(bool red,
+ bool green,
+ bool blue,
+ bool alpha);
+ void SetupExpectationsForStencilMask(GLuint front_mask, GLuint back_mask);
+
+ void SetupExpectationsForApplyingDirtyState(
+ bool framebuffer_is_rgb,
+ bool framebuffer_has_depth,
+ bool framebuffer_has_stencil,
+ GLuint color_bits, // NOTE! bits are 0x1000, 0x0100, 0x0010, and 0x0001
+ bool depth_mask,
+ bool depth_enabled,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask,
+ bool stencil_enabled);
+
+ void SetupExpectationsForApplyingDefaultDirtyState();
+
+ void AddExpectationsForSimulatedAttrib0WithError(
+ GLsizei num_vertices, GLuint buffer_id, GLenum error);
+
+ void AddExpectationsForSimulatedAttrib0(
+ GLsizei num_vertices, GLuint buffer_id);
+
+ void AddExpectationsForGenVertexArraysOES();
+ void AddExpectationsForDeleteVertexArraysOES();
+ void AddExpectationsForDeleteBoundVertexArraysOES();
+ void AddExpectationsForBindVertexArrayOES();
+ void AddExpectationsForRestoreAttribState(GLuint attrib);
+
+ GLvoid* BufferOffset(unsigned i) {
+ return static_cast<int8 *>(NULL)+(i);
+ }
+
+ template <typename Command, typename Result>
+ bool IsObjectHelper(GLuint client_id) {
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ Command cmd;
+ cmd.Init(client_id, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bool isObject = static_cast<bool>(*result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ return isObject;
+ }
+
+ protected:
+ static const int kBackBufferWidth = 128;
+ static const int kBackBufferHeight = 64;
+
+ static const GLint kMaxTextureSize = 2048;
+ static const GLint kMaxCubeMapTextureSize = 256;
+ static const GLint kNumVertexAttribs = 16;
+ static const GLint kNumTextureUnits = 8;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxVertexTextureImageUnits = 2;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kMaxViewportWidth = 8192;
+ static const GLint kMaxViewportHeight = 8192;
+
+ static const GLint kViewportX = 0;
+ static const GLint kViewportY = 0;
+ static const GLint kViewportWidth = kBackBufferWidth;
+ static const GLint kViewportHeight = kBackBufferHeight;
+
+ static const GLuint kServiceAttrib0BufferId = 801;
+ static const GLuint kServiceFixedAttribBufferId = 802;
+
+ static const GLuint kServiceBufferId = 301;
+ static const GLuint kServiceFramebufferId = 302;
+ static const GLuint kServiceRenderbufferId = 303;
+ static const GLuint kServiceTextureId = 304;
+ static const GLuint kServiceProgramId = 305;
+ static const GLuint kServiceShaderId = 306;
+ static const GLuint kServiceElementBufferId = 308;
+ static const GLuint kServiceQueryId = 309;
+ static const GLuint kServiceVertexArrayId = 310;
+
+ static const int32 kSharedMemoryId = 401;
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32 kSharedMemoryOffset = 132;
+ static const int32 kInvalidSharedMemoryId = 402;
+ static const uint32 kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const uint32 kInitialResult = 0xBDBDBDBDu;
+ static const uint8 kInitialMemoryValue = 0xBDu;
+
+ static const uint32 kNewClientId = 501;
+ static const uint32 kNewServiceId = 502;
+ static const uint32 kInvalidClientId = 601;
+
+ static const GLuint kServiceVertexShaderId = 321;
+ static const GLuint kServiceFragmentShaderId = 322;
+
+ static const GLuint kServiceCopyTextureChromiumShaderId = 701;
+ static const GLuint kServiceCopyTextureChromiumProgramId = 721;
+
+ static const GLuint kServiceCopyTextureChromiumTextureBufferId = 751;
+ static const GLuint kServiceCopyTextureChromiumVertexBufferId = 752;
+ static const GLuint kServiceCopyTextureChromiumFBOId = 753;
+ static const GLuint kServiceCopyTextureChromiumPositionAttrib = 761;
+ static const GLuint kServiceCopyTextureChromiumTexAttrib = 762;
+ static const GLuint kServiceCopyTextureChromiumSamplerLocation = 763;
+
+ static const GLsizei kNumVertices = 100;
+ static const GLsizei kNumIndices = 10;
+ static const int kValidIndexRangeStart = 1;
+ static const int kValidIndexRangeCount = 7;
+ static const int kInvalidIndexRangeStart = 0;
+ static const int kInvalidIndexRangeCount = 7;
+ static const int kOutOfRangeIndexRangeEnd = 10;
+ static const GLuint kMaxValidIndex = 7;
+
+ static const GLint kMaxAttribLength = 10;
+ static const char* kAttrib1Name;
+ static const char* kAttrib2Name;
+ static const char* kAttrib3Name;
+ static const GLint kAttrib1Size = 1;
+ static const GLint kAttrib2Size = 1;
+ static const GLint kAttrib3Size = 1;
+ static const GLint kAttrib1Location = 0;
+ static const GLint kAttrib2Location = 1;
+ static const GLint kAttrib3Location = 2;
+ static const GLenum kAttrib1Type = GL_FLOAT_VEC4;
+ static const GLenum kAttrib2Type = GL_FLOAT_VEC2;
+ static const GLenum kAttrib3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidAttribLocation = 30;
+ static const GLint kBadAttribIndex = kNumVertexAttribs;
+
+ static const GLint kMaxUniformLength = 12;
+ static const char* kUniform1Name;
+ static const char* kUniform2Name;
+ static const char* kUniform3Name;
+ static const GLint kUniform1Size = 1;
+ static const GLint kUniform2Size = 3;
+ static const GLint kUniform3Size = 2;
+ static const GLint kUniform1RealLocation = 3;
+ static const GLint kUniform2RealLocation = 10;
+ static const GLint kUniform2ElementRealLocation = 12;
+ static const GLint kUniform3RealLocation = 20;
+ static const GLint kUniform1FakeLocation = 0; // These are
+ static const GLint kUniform2FakeLocation = 1; // hardcoded
+ static const GLint kUniform2ElementFakeLocation = 0x10001; // to match
+ static const GLint kUniform3FakeLocation = 2; // ProgramManager.
+ static const GLint kUniform1DesiredLocation = -1;
+ static const GLint kUniform2DesiredLocation = -1;
+ static const GLint kUniform3DesiredLocation = -1;
+ static const GLenum kUniform1Type = GL_SAMPLER_2D;
+ static const GLenum kUniform2Type = GL_INT_VEC2;
+ static const GLenum kUniform3Type = GL_FLOAT_VEC3;
+ static const GLenum kUniformSamplerExternalType = GL_SAMPLER_EXTERNAL_OES;
+ static const GLenum kUniformCubemapType = GL_SAMPLER_CUBE;
+ static const GLint kInvalidUniformLocation = 30;
+ static const GLint kBadUniformIndex = 1000;
+
+ // Use StrictMock to make 100% sure we know how GL will be called.
+ scoped_ptr< ::testing::StrictMock< ::gfx::MockGLInterface> > gl_;
+ scoped_refptr<gfx::GLSurfaceStub> surface_;
+ scoped_refptr<gfx::GLContextStubWithExtensions> context_;
+ scoped_ptr<MockGLES2Decoder> mock_decoder_;
+ scoped_ptr<GLES2Decoder> decoder_;
+ MemoryTracker* memory_tracker_;
+
+ GLuint client_buffer_id_;
+ GLuint client_framebuffer_id_;
+ GLuint client_program_id_;
+ GLuint client_renderbuffer_id_;
+ GLuint client_shader_id_;
+ GLuint client_texture_id_;
+ GLuint client_element_buffer_id_;
+ GLuint client_vertex_shader_id_;
+ GLuint client_fragment_shader_id_;
+ GLuint client_query_id_;
+ GLuint client_vertexarray_id_;
+
+ uint32 shared_memory_id_;
+ uint32 shared_memory_offset_;
+ void* shared_memory_address_;
+ void* shared_memory_base_;
+
+ GLuint service_renderbuffer_id_;
+ bool service_renderbuffer_valid_;
+
+ uint32 immediate_buffer_[64];
+
+ const bool ignore_cached_state_for_test_;
+ bool cached_color_mask_red_;
+ bool cached_color_mask_green_;
+ bool cached_color_mask_blue_;
+ bool cached_color_mask_alpha_;
+ bool cached_depth_mask_;
+ GLuint cached_stencil_front_mask_;
+ GLuint cached_stencil_back_mask_;
+
+ struct EnableFlags {
+ EnableFlags();
+ bool cached_blend;
+ bool cached_cull_face;
+ bool cached_depth_test;
+ bool cached_dither;
+ bool cached_polygon_offset_fill;
+ bool cached_sample_alpha_to_coverage;
+ bool cached_sample_coverage;
+ bool cached_scissor_test;
+ bool cached_stencil_test;
+ };
+
+ EnableFlags enable_flags_;
+
+ private:
+ class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ MockCommandBufferEngine();
+
+ virtual ~MockCommandBufferEngine();
+
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE;
+
+ void ClearSharedMemory() {
+ memset(valid_buffer_->memory(), kInitialMemoryValue, kSharedBufferSize);
+ }
+
+ virtual void set_token(int32 token) OVERRIDE;
+
+ virtual bool SetGetBuffer(int32 /* transfer_buffer_id */) OVERRIDE;
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE;
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE;
+
+ private:
+ scoped_refptr<gpu::Buffer> valid_buffer_;
+ scoped_refptr<gpu::Buffer> invalid_buffer_;
+ };
+
+ // MockGLStates is used to track GL states and emulate driver
+ // behaviors on top of MockGLInterface.
+ class MockGLStates {
+ public:
+ MockGLStates()
+ : bound_array_buffer_object_(0),
+ bound_vertex_array_object_(0) {
+ }
+
+ ~MockGLStates() {
+ }
+
+ void OnBindArrayBuffer(GLuint id) {
+ bound_array_buffer_object_ = id;
+ }
+
+ void OnBindVertexArrayOES(GLuint id) {
+ bound_vertex_array_object_ = id;
+ }
+
+ void OnVertexAttribNullPointer() {
+ // When a vertex array object is bound, some drivers (AMD Linux,
+ // Qualcomm, etc.) have a bug where it incorrectly generates an
+ // GL_INVALID_OPERATION on glVertexAttribPointer() if pointer
+ // is NULL, no buffer is bound on GL_ARRAY_BUFFER.
+ // Make sure we don't trigger this bug.
+ if (bound_vertex_array_object_ != 0)
+ EXPECT_TRUE(bound_array_buffer_object_ != 0);
+ }
+
+ private:
+ GLuint bound_array_buffer_object_;
+ GLuint bound_vertex_array_object_;
+ }; // class MockGLStates
+
+ void AddExpectationsForVertexAttribManager();
+ void SetupMockGLBehaviors();
+
+ scoped_ptr< ::testing::StrictMock<MockCommandBufferEngine> > engine_;
+ scoped_refptr<ContextGroup> group_;
+ MockGLStates gl_states_;
+};
+
+class GLES2DecoderWithShaderTestBase : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderWithShaderTestBase()
+ : GLES2DecoderTestBase() {
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+};
+
+// SpecializedSetup specializations that are needed in multiple unittest files.
+template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(bool valid);
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_BASE_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
new file mode 100644
index 0000000..74149ef
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
@@ -0,0 +1,427 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderRestoreStateTest : public GLES2DecoderManualInitTest {
+ public:
+ GLES2DecoderRestoreStateTest() {}
+
+ protected:
+ void AddExpectationsForActiveTexture(GLenum unit);
+ void AddExpectationsForBindTexture(GLenum target, GLuint id);
+ void InitializeContextState(ContextState* state,
+ uint32 non_default_unit,
+ uint32 active_unit);
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderRestoreStateTest,
+ ::testing::Bool());
+
+void GLES2DecoderRestoreStateTest::AddExpectationsForActiveTexture(
+ GLenum unit) {
+ EXPECT_CALL(*gl_, ActiveTexture(unit)).Times(1).RetiresOnSaturation();
+}
+
+void GLES2DecoderRestoreStateTest::AddExpectationsForBindTexture(GLenum target,
+ GLuint id) {
+ EXPECT_CALL(*gl_, BindTexture(target, id)).Times(1).RetiresOnSaturation();
+}
+
+void GLES2DecoderRestoreStateTest::InitializeContextState(
+ ContextState* state,
+ uint32 non_default_unit,
+ uint32 active_unit) {
+ state->texture_units.resize(group().max_texture_units());
+ for (uint32 tt = 0; tt < state->texture_units.size(); ++tt) {
+ TextureRef* ref_cube_map =
+ group().texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP);
+ state->texture_units[tt].bound_texture_cube_map = ref_cube_map;
+ TextureRef* ref_2d =
+ (tt == non_default_unit)
+ ? group().texture_manager()->GetTexture(client_texture_id_)
+ : group().texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
+ state->texture_units[tt].bound_texture_2d = ref_2d;
+ }
+ state->active_texture_unit = active_unit;
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NullPreviousStateBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupTexture();
+
+ InSequence sequence;
+ // Expect to restore texture bindings for unit GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId);
+
+ // Expect to restore texture bindings for remaining units.
+ for (uint32 i = 1; i < group().max_texture_units(); ++i) {
+ AddExpectationsForActiveTexture(GL_TEXTURE0 + i);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId);
+ }
+
+ // Expect to restore the active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(NULL);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NullPreviousState) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+ SetupTexture();
+
+ InSequence sequence;
+ // Expect to restore texture bindings for unit GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP, 0);
+
+ // Expect to restore texture bindings for remaining units.
+ for (uint32 i = 1; i < group().max_texture_units(); ++i) {
+ AddExpectationsForActiveTexture(GL_TEXTURE0 + i);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+ AddExpectationsForBindTexture(GL_TEXTURE_CUBE_MAP, 0);
+ }
+
+ // Expect to restore the active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(NULL);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, WithPreviousStateBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE0 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, WithPreviousState) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE0 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, ActiveUnit1) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with all texture bindings
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, std::numeric_limits<uint32>::max(), 0);
+
+ InSequence sequence;
+ // Expect to restore only GL_TEXTURE_2D binding for GL_TEXTURE1 unit,
+ // since the rest of the bindings haven't changed between the current
+ // state and the |prev_state|.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit0BGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE0 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 0, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE0 unit to
+ // a default texture.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE1 unit to
+ // non-default.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit1BGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE0 unit.
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE1 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 1, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding to the non-default texture
+ // for GL_TEXTURE0 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore GL_TEXTURE_2D binding to the default texture
+ // for GL_TEXTURE1 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D,
+ TestHelper::kServiceDefaultTexture2dId);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit0) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE1 unit.
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE0 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 0, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE0 unit to
+ // the 0 texture.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+
+ // Expect to restore GL_TEXTURE_2D binding for GL_TEXTURE1 unit to
+ // non-default.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore active texture unit to GL_TEXTURE1.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit1) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ // Bind a non-default texture to GL_TEXTURE0 unit.
+ SetupTexture();
+
+ // Construct a previous ContextState with GL_TEXTURE_2D target in
+ // GL_TEXTURE1 unit bound to a non-default texture and the rest
+ // set to default textures.
+ ContextState prev_state(NULL, NULL, NULL);
+ InitializeContextState(&prev_state, 1, kServiceTextureId);
+
+ InSequence sequence;
+ // Expect to restore GL_TEXTURE_2D binding to the non-default texture
+ // for GL_TEXTURE0 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, kServiceTextureId);
+
+ // Expect to restore GL_TEXTURE_2D binding to the 0 texture
+ // for GL_TEXTURE1 unit.
+ AddExpectationsForActiveTexture(GL_TEXTURE1);
+ AddExpectationsForBindTexture(GL_TEXTURE_2D, 0);
+
+ // Expect to restore active texture unit to GL_TEXTURE0.
+ AddExpectationsForActiveTexture(GL_TEXTURE0);
+
+ GetDecoder()->RestoreAllTextureUnitBindings(&prev_state);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ContextStateCapabilityCaching) {
+ struct TestInfo {
+ GLenum gl_enum;
+ bool default_state;
+ bool expect_set;
+ };
+
+ // TODO(vmiura): Should autogen this to match build_gles2_cmd_buffer.py.
+ TestInfo test[] = {{GL_BLEND, false, true},
+ {GL_CULL_FACE, false, true},
+ {GL_DEPTH_TEST, false, false},
+ {GL_DITHER, true, true},
+ {GL_POLYGON_OFFSET_FILL, false, true},
+ {GL_SAMPLE_ALPHA_TO_COVERAGE, false, true},
+ {GL_SAMPLE_COVERAGE, false, true},
+ {GL_SCISSOR_TEST, false, true},
+ {GL_STENCIL_TEST, false, false},
+ {0, false, false}};
+
+ InitState init;
+ init.gl_version = "2.1";
+ InitDecoder(init);
+
+ for (int i = 0; test[i].gl_enum; i++) {
+ bool enable_state = test[i].default_state;
+
+ // Test setting default state initially is ignored.
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+
+ // Test new and cached state changes.
+ for (int n = 0; n < 3; n++) {
+ enable_state = !enable_state;
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+ EnableDisableTest(test[i].gl_enum, enable_state, test[i].expect_set);
+ }
+ }
+}
+
+// TODO(vmiura): Tests for VAO restore.
+
+// TODO(vmiura): Tests for ContextState::RestoreAttribute().
+
+// TODO(vmiura): Tests for ContextState::RestoreBufferBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreProgramBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreRenderbufferBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreProgramBindings().
+
+// TODO(vmiura): Tests for ContextState::RestoreGlobalState().
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
new file mode 100644
index 0000000..7c8e5ae
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
@@ -0,0 +1,2390 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderGeometryInstancingTest : public GLES2DecoderWithShaderTest {
+ public:
+ GLES2DecoderGeometryInstancingTest() : GLES2DecoderWithShaderTest() {}
+
+ virtual void SetUp() {
+ InitState init;
+ init.extensions = "GL_ANGLE_instanced_arrays";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderGeometryInstancingTest,
+ ::testing::Bool());
+
+void GLES2DecoderManualInitTest::DirtyStateMaskTest(GLuint color_bits,
+ bool depth_mask,
+ GLuint front_stencil_mask,
+ GLuint back_stencil_mask) {
+ ColorMask color_mask_cmd;
+ color_mask_cmd.Init((color_bits & 0x1000) != 0,
+ (color_bits & 0x0100) != 0,
+ (color_bits & 0x0010) != 0,
+ (color_bits & 0x0001) != 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ DepthMask depth_mask_cmd;
+ depth_mask_cmd.Init(depth_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ StencilMaskSeparate front_stencil_mask_cmd;
+ front_stencil_mask_cmd.Init(GL_FRONT, front_stencil_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(front_stencil_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ StencilMaskSeparate back_stencil_mask_cmd;
+ back_stencil_mask_cmd.Init(GL_BACK, back_stencil_mask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(back_stencil_mask_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupExpectationsForApplyingDirtyState(
+ false, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ true, // Framebuffer has stencil
+ color_bits, // color bits
+ depth_mask, // depth mask
+ false, // depth enabled
+ front_stencil_mask, // front stencil mask
+ back_stencil_mask, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Test that with an RGB backbuffer if we set the color mask to 1,1,1,1 it is
+// set to 1,1,1,0 at Draw time but is 1,1,1,1 at query time.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferColorMask) {
+ ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_COLOR_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_COLOR_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_COLOR_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+ EXPECT_EQ(1, result->GetData()[1]);
+ EXPECT_EQ(1, result->GetData()[2]);
+ EXPECT_EQ(1, result->GetData()[3]);
+}
+
+// Test that with no depth if we set DepthMask true that it's set to false at
+// draw time but querying it returns true.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferDepthMask) {
+ EXPECT_CALL(*gl_, DepthMask(true)).Times(0).RetiresOnSaturation();
+ DepthMask cmd;
+ cmd.Init(true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+// Test that with no stencil if we set the stencil mask it's still set to 0 at
+// draw time but gets our value if we query.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferStencilMask) {
+ const GLint kMask = 123;
+ EXPECT_CALL(*gl_, StencilMask(kMask)).Times(0).RetiresOnSaturation();
+ StencilMask cmd;
+ cmd.Init(kMask);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_WRITEMASK, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_WRITEMASK, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(
+ decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_WRITEMASK),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(kMask, result->GetData()[0]);
+}
+
+// Test that if an FBO is bound we get the correct masks.
+TEST_P(GLES2DecoderRGBBackbufferTest, RGBBackbufferColorMaskFBO) {
+ ColorMask cmd;
+ cmd.Init(true, true, true, true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DoEnableVertexAttribArray(2);
+ DoVertexAttribPointer(2, 2, GL_FLOAT, 0, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that no extra calls are made on the next draw.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Setup Frame buffer.
+ // needs to be 1x1 or else it's not renderable.
+ const GLsizei kWidth = 1;
+ const GLsizei kHeight = 1;
+ const GLenum kFormat = GL_RGB;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Pass some data so the texture will be marked as cleared.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ // This time state needs to be set.
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that no extra calls are made on the next draw.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Unbind
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthEnableWithDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_DEPTH_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ true, // depth mask
+ true, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthEnableWithoutRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_DEPTH_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, StencilEnableWithStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_STENCIL_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(
+ true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ true, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ GLES2Decoder::kDefaultStencilMask, // front stencil mask
+ GLES2Decoder::kDefaultStencilMask, // back stencil mask
+ true); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, StencilEnableWithoutRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ Enable cmd;
+ cmd.Init(GL_STENCIL_TEST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupDefaultProgram();
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(true, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1110, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays draw_cmd;
+ draw_cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(draw_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_TEST, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_TEST, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_TEST),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(1, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedColorMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all color_bits combinations twice.
+ for (int i = 0; i < 32; i++) {
+ GLuint color_bits = (i & 1 ? 0x0001 : 0x0000) | (i & 2 ? 0x0010 : 0x0000) |
+ (i & 4 ? 0x0100 : 0x0000) | (i & 8 ? 0x1000 : 0x0000);
+
+ // Toggle depth_test to force ApplyDirtyState each time.
+ DirtyStateMaskTest(color_bits, false, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(color_bits, true, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(color_bits, false, 0xffffffff, 0xffffffff);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedDepthMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all depth_mask combinations twice.
+ for (int i = 0; i < 4; i++) {
+ bool depth_mask = (i & 1) == 1;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, depth_mask, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(0x0101, depth_mask, 0xffffffff, 0xffffffff);
+ DirtyStateMaskTest(0x1010, depth_mask, 0xffffffff, 0xffffffff);
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CachedStencilMask) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ SetupTexture();
+
+ // Test all stencil_mask combinations twice.
+ for (int i = 0; i < 4; i++) {
+ GLuint stencil_mask = (i & 1) ? 0xf0f0f0f0 : 0x0f0f0f0f;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, true, stencil_mask, 0xffffffff);
+ DirtyStateMaskTest(0x0101, true, stencil_mask, 0xffffffff);
+ DirtyStateMaskTest(0x1010, true, stencil_mask, 0xffffffff);
+ }
+
+ for (int i = 0; i < 4; i++) {
+ GLuint stencil_mask = (i & 1) ? 0xf0f0f0f0 : 0x0f0f0f0f;
+
+ // Toggle color masks to force ApplyDirtyState each time.
+ DirtyStateMaskTest(0x1010, true, 0xffffffff, stencil_mask);
+ DirtyStateMaskTest(0x0101, true, 0xffffffff, stencil_mask);
+ DirtyStateMaskTest(0x1010, true, 0xffffffff, stencil_mask);
+ }
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysNoAttributesSucceeds) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Tests when the math overflows (0x40000000 * sizeof GLfloat)
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0OverflowFails) {
+ const GLsizei kLargeCount = 0x40000000;
+ SetupTexture();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Tests when the math overflows (0x7FFFFFFF + 1 = 0x8000000 verts)
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0PosToNegFails) {
+ const GLsizei kLargeCount = 0x7FFFFFFF;
+ SetupTexture();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Tests when the driver returns an error
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysSimulatedAttrib0OOMFails) {
+ const GLsizei kFakeLargeCount = 0x1234;
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0WithError(
+ kFakeLargeCount, 0, GL_OUT_OF_MEMORY);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kFakeLargeCount);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(GetDecoder()->WasContextLost());
+}
+
+// Test that we lose context.
+TEST_P(GLES2DecoderManualInitTest, LoseContextWhenOOM) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ init.lose_context_when_out_of_memory = true;
+ InitDecoder(init);
+ SetupDefaultProgram();
+
+ const GLsizei kFakeLargeCount = 0x1234;
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0WithError(
+ kFakeLargeCount, 0, GL_OUT_OF_MEMORY);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ // Other contexts in the group should be lost also.
+ EXPECT_CALL(*mock_decoder_, LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kFakeLargeCount);
+ // This context should be lost.
+ EXPECT_EQ(error::kLostContext, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_TRUE(decoder_->WasContextLost());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysBadTextureUsesBlack) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // This is an NPOT texture. As the default filtering requires mips
+ // this should trigger replacing with black textures before rendering.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 3,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ {
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_, BindTexture(GL_TEXTURE_2D, TestHelper::kServiceBlackTexture2dId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysMissingAttributesFails) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawArraysMissingAttributesZeroCountSucceeds) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Same as DrawArraysValidAttributesSucceeds, but with workaround
+// |init_vertex_attributes|.
+TEST_P(GLES2DecoderManualInitTest, InitVertexAttributes) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::INIT_VERTEX_ATTRIBUTES));
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ SetupDefaultProgram();
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysDeletedBufferFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DeleteVertexBuffer();
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysDeletedProgramSucceeds) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId)).Times(1);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysWithInvalidModeFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_QUADS, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysInvalidCountFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ // Try start > 0
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0);
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 1, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with count > size
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with attrib offset > 0
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with size > 2 (ie, vec3 instead of vec2)
+ DoVertexAttribPointer(1, 3, GL_FLOAT, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with stride > 8 (vec2 + vec2 byte)
+ DoVertexAttribPointer(1, 2, GL_FLOAT, sizeof(GLfloat) * 3, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysInstancedANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, VertexAttribDivisorANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(_, _))
+ .Times(0)
+ .RetiresOnSaturation();
+
+ VertexAttribDivisorANGLE cmd;
+ cmd.Init(0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLENoAttributesFails) {
+ SetupTexture();
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLESimulatedAttrib0) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, 3))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEMissingAttributesFails) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEMissingAttributesZeroCountSucceeds) {
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, 0, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEWithInvalidModeFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_QUADS, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLEInvalidPrimcountFails) {
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _)).Times(0);
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, 1, -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+// Per-instance data is twice as large, but number of instances is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeInstanceSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, kNumVertices / 2))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Regular drawArrays takes the divisor into account
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysWithDivisorSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ // Access the data right at the end of the buffer.
+ DoVertexAttribPointer(
+ 0, 2, GL_FLOAT, 0, (kNumVertices - 1) * 2 * sizeof(GLfloat));
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-instance data is twice as large, but divisor is twice
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeDivisorSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 2);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest, DrawArraysInstancedANGLELargeFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices + 1, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-index data is twice as large, but number of indices is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLELargeIndexSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 4, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawArraysInstancedANGLE(GL_TRIANGLES, 0, kNumVertices / 2, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices / 2, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysInstancedANGLENoDivisor0Fails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawArraysInstancedANGLE(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArraysInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawArraysNoDivisor0Fails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsNoAttributesSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsMissingAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawElementsMissingAttributesZeroCountSucceeds) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsExtraAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(6);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsValidAttributesSucceeds) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsDeletedBufferFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ DeleteIndexBuffer();
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsDeletedProgramSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoDeleteProgram(client_program_id_, kServiceProgramId);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(1);
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId)).Times(1);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsWithInvalidModeFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_QUADS,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_POLYGON,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsInvalidCountFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ // Try start > 0
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, kNumIndices, GL_UNSIGNED_SHORT, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Try with count > size
+ cmd.Init(GL_TRIANGLES, kNumIndices + 1, GL_UNSIGNED_SHORT, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsOutOfRangeIndicesFails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kInvalidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kInvalidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsOddOffsetForUint16Fails) {
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(0);
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES, kInvalidIndexRangeCount, GL_UNSIGNED_SHORT, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsInstancedANGLEFails) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLENoAttributesFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLESimulatedAttrib0) {
+ SetupTexture();
+ SetupVertexBuffer();
+ SetupIndexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ 3))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, VertexAttribDivisorANGLE(0, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEMissingAttributesFails) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEMissingAttributesZeroCountSucceeds) {
+ SetupIndexBuffer();
+ DoEnableVertexAttribArray(1);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, kValidIndexRangeStart * 2, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEValidAttributesSucceeds) {
+ SetupIndexBuffer();
+ SetupTexture();
+ SetupVertexBuffer();
+ DoEnableVertexAttribArray(1);
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, kServiceBufferId);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEWithInvalidModeFails) {
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _)).Times(0);
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_QUADS,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_INVALID_ENUM,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+// Per-instance data is twice as large, but number of instances is half
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeInstanceSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ // Add offset so we're sure we're accessing data near the end of the buffer.
+ DoVertexAttribPointer(
+ 1,
+ 2,
+ GL_FLOAT,
+ 0,
+ (kNumVertices - kMaxValidIndex - 1) * 2 * sizeof(GLfloat));
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices / 2))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Regular drawElements takes the divisor into account
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsWithDivisorSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ // Add offset so we're sure we're accessing data near the end of the buffer.
+ DoVertexAttribPointer(
+ 1,
+ 2,
+ GL_FLOAT,
+ 0,
+ (kNumVertices - kMaxValidIndex - 1) * 2 * sizeof(GLfloat));
+
+ DoEnableVertexAttribArray(0);
+ // Access the data right at the end of the buffer.
+ DoVertexAttribPointer(
+ 0, 2, GL_FLOAT, 0, (kNumVertices - 1) * 2 * sizeof(GLfloat));
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-instance data is twice as large, but divisor is twice
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeDivisorSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 4, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 2);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TRIANGLES,
+ kInvalidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kInvalidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLEInvalidPrimcountFails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ -1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Per-index data is twice as large, but values of indices are smaller
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLELargeIndexSucceeds) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoVertexAttribPointer(1, 4, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ EXPECT_CALL(
+ *gl_,
+ DrawElementsInstancedANGLE(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2),
+ kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsInstancedANGLENoDivisor0Fails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawElementsInstancedANGLE(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElementsInstancedANGLE cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2,
+ kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderGeometryInstancingTest,
+ DrawElementsNoDivisor0Fails) {
+ SetupTexture();
+ SetupIndexBuffer();
+ SetupVertexBuffer();
+ DoVertexAttribPointer(1, 2, GL_FLOAT, 0, 0);
+
+ DoEnableVertexAttribArray(0);
+ DoVertexAttribPointer(0, 2, GL_FLOAT, 0, 0);
+ DoVertexAttribDivisorANGLE(0, 1);
+ DoVertexAttribDivisorANGLE(1, 1);
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawArraysClearsAfterTexImage2DNULL) {
+ SetupAllNeededVertexBuffers();
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Create an uncleared texture with 2 levels.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawElementsClearsAfterTexImage2DNULL) {
+ SetupAllNeededVertexBuffers();
+ SetupIndexBuffer();
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Create an uncleared texture with 2 levels.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again
+ EXPECT_CALL(*gl_,
+ DrawElements(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ BufferOffset(kValidIndexRangeStart * 2)))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawClearsAfterTexImage2DNULLInFBO) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ SetupAllNeededVertexBuffers();
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawWitFBOThatCantClearDoesNotDraw) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_UNSUPPORTED))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(_, _, _)).Times(0).RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_FRAMEBUFFER_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, DrawClearsAfterRenderbufferStorageInFBO) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(
+ GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 100, 50, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawArraysClearsAfterTexImage2DNULLCubemap) {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+
+ static const GLenum faces[] = {
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ };
+ SetupCubemapProgram();
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+ // Fill out all the faces for 2 levels, leave 2 uncleared.
+ for (int ii = 0; ii < 6; ++ii) {
+ GLenum face = faces[ii];
+ int32 shm_id =
+ (face == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y) ? 0 : kSharedMemoryId;
+ uint32 shm_offset =
+ (face == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y) ? 0 : kSharedMemoryOffset;
+ DoTexImage2D(face,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shm_id,
+ shm_offset);
+ DoTexImage2D(face,
+ 1,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shm_id,
+ shm_offset);
+ }
+ // Expect 2 levels will be cleared.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 1,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 1,
+ 1);
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawClearsAfterRenderbuffersWithMultipleAttachments) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER,
+ GL_DEPTH_COMPONENT16,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ SetupTexture();
+ SetupExpectationsForFramebufferClearing(
+ GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ true, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ true, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ DrawingWithFBOTwiceChecksForFBOCompleteOnce) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ SetupAllNeededVertexBuffers();
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture that is cleared.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ // Make sure we check for framebuffer complete.
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // But not again.
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawClearsDepthTexture) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ SetupDefaultProgram();
+ SetupAllNeededVertexBuffers();
+ const GLenum attachment = GL_DEPTH_ATTACHMENT;
+ const GLenum target = GL_TEXTURE_2D;
+ const GLint level = 0;
+ DoBindTexture(target, client_texture_id_, kServiceTextureId);
+
+ // Create a depth texture.
+ DoTexImage2D(target,
+ level,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+
+ // Enable GL_SCISSOR_TEST to make sure we disable it in the clear,
+ // then re-enable it.
+ DoEnableDisable(GL_SCISSOR_TEST, true);
+
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_DRAW_FRAMEBUFFER_EXT,
+ attachment,
+ target,
+ kServiceTextureId,
+ level))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_DRAW_FRAMEBUFFER_EXT))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
+ SetupExpectationsForStencilMask(GLES2Decoder::kDefaultStencilMask,
+ GLES2Decoder::kDefaultStencilMask);
+ EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
+ SetupExpectationsForDepthMask(true);
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, false);
+
+ EXPECT_CALL(*gl_, Clear(GL_DEPTH_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ SetupExpectationsForRestoreClearState(0.0f, 0.0f, 0.0f, 0.0f, 0, 1.0f, true);
+
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_DRAW_FRAMEBUFFER_EXT, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
new file mode 100644
index 0000000..28e24de
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2DecoderTestWithCHROMIUMPathRendering : public GLES2DecoderTest {
+ public:
+ GLES2DecoderTestWithCHROMIUMPathRendering() {}
+ virtual void SetUp() OVERRIDE {
+ InitState init;
+ init.gl_version = "opengl es 3.1";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ init.extensions = "GL_NV_path_rendering";
+ InitDecoder(init);
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderTestWithCHROMIUMPathRendering,
+ ::testing::Bool());
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
new file mode 100644
index 0000000..a81be2f
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by gles2_cmd_decoder_unittest_extensions.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
+
+// TODO(gman): BlitFramebufferCHROMIUM
+// TODO(gman): RenderbufferStorageMultisampleCHROMIUM
+// TODO(gman): RenderbufferStorageMultisampleEXT
+// TODO(gman): FramebufferTexture2DMultisampleEXT
+// TODO(gman): DiscardFramebufferEXTImmediate
+
+TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
+ MatrixLoadfCHROMIUMImmediateValidArgs) {
+ cmds::MatrixLoadfCHROMIUMImmediate& cmd =
+ *GetImmediateAs<cmds::MatrixLoadfCHROMIUMImmediate>();
+ SpecializedSetup<cmds::MatrixLoadfCHROMIUMImmediate, 0>(true);
+ GLfloat temp[16] = {
+ 0,
+ };
+ cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &temp[0]);
+ EXPECT_CALL(
+ *gl_,
+ MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM,
+ reinterpret_cast<GLfloat*>(ImmediateDataAddress(&cmd))));
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
+ MatrixLoadIdentityCHROMIUMValidArgs) {
+ EXPECT_CALL(*gl_, MatrixLoadIdentityEXT(GL_PATH_PROJECTION_CHROMIUM));
+ SpecializedSetup<cmds::MatrixLoadIdentityCHROMIUM, 0>(true);
+ cmds::MatrixLoadIdentityCHROMIUM cmd;
+ cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
new file mode 100644
index 0000000..32ba98d
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -0,0 +1,2395 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderTestWithExtensionsOnGLES2 : public GLES2DecoderTest {
+ public:
+ GLES2DecoderTestWithExtensionsOnGLES2() {}
+
+ virtual void SetUp() {}
+ void Init(const char* extensions) {
+ InitState init;
+ init.extensions = extensions;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+ }
+};
+
+TEST_P(GLES2DecoderTest, CheckFramebufferStatusWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
+ CheckFramebufferStatus::Result* result =
+ static_cast<CheckFramebufferStatus::Result*>(shared_memory_address_);
+ *result = 0;
+ CheckFramebufferStatus cmd;
+ cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE), *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindAndDeleteFramebuffer) {
+ SetupTexture();
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ true,
+ GL_FRAMEBUFFER,
+ 0,
+ true,
+ GL_FRAMEBUFFER,
+ 0);
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
+ FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferTexture2DWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
+ FramebufferTexture2D cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
+ .Times(0);
+ GetFramebufferAttachmentParameteriv cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithRenderbuffer) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->size = 0;
+ const GLint* result_value = result->GetData();
+ FramebufferRenderbuffer fbrb_cmd;
+ GetFramebufferAttachmentParameteriv cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(static_cast<GLuint>(*result_value), client_renderbuffer_id_);
+}
+
+TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithTexture) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GetFramebufferAttachmentParameteriv::Result* result =
+ static_cast<GetFramebufferAttachmentParameteriv::Result*>(
+ shared_memory_address_);
+ result->SetNumResults(0);
+ const GLint* result_value = result->GetData();
+ FramebufferTexture2D fbtex_cmd;
+ GetFramebufferAttachmentParameteriv cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(static_cast<GLuint>(*result_value), client_texture_id_);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ GetRenderbufferParameterivRebindRenderbuffer) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+
+ GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_RED_SIZE,
+ shared_memory_id_,
+ shared_memory_offset_);
+
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetRenderbufferParameterivEXT(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, _));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GetRenderbufferParameterivWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
+ GetRenderbufferParameteriv cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ GL_RENDERBUFFER_WIDTH,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, RenderbufferStorageRebindRenderbuffer) {
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageWithNoBoundTarget) {
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+namespace {
+
+// A class to emulate glReadPixels
+class ReadPixelsEmulator {
+ public:
+ // pack_alignment is the alignment you want ReadPixels to use
+ // when copying. The actual data passed in pixels should be contiguous.
+ ReadPixelsEmulator(GLsizei width,
+ GLsizei height,
+ GLint bytes_per_pixel,
+ const void* src_pixels,
+ const void* expected_pixels,
+ GLint pack_alignment)
+ : width_(width),
+ height_(height),
+ pack_alignment_(pack_alignment),
+ bytes_per_pixel_(bytes_per_pixel),
+ src_pixels_(reinterpret_cast<const int8*>(src_pixels)),
+ expected_pixels_(reinterpret_cast<const int8*>(expected_pixels)) {}
+
+ void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) const {
+ DCHECK_GE(x, 0);
+ DCHECK_GE(y, 0);
+ DCHECK_LE(x + width, width_);
+ DCHECK_LE(y + height, height_);
+ for (GLint yy = 0; yy < height; ++yy) {
+ const int8* src = GetPixelAddress(src_pixels_, x, y + yy);
+ const void* dst = ComputePackAlignmentAddress(0, yy, width, pixels);
+ memcpy(const_cast<void*>(dst), src, width * bytes_per_pixel_);
+ }
+ }
+
+ bool CompareRowSegment(GLint x,
+ GLint y,
+ GLsizei width,
+ const void* data) const {
+ DCHECK(x + width <= width_ || width == 0);
+ return memcmp(data,
+ GetPixelAddress(expected_pixels_, x, y),
+ width * bytes_per_pixel_) == 0;
+ }
+
+ // Helper to compute address of pixel in pack aligned data.
+ const void* ComputePackAlignmentAddress(GLint x,
+ GLint y,
+ GLsizei width,
+ const void* address) const {
+ GLint unpadded_row_size = ComputeImageDataSize(width, 1);
+ GLint two_rows_size = ComputeImageDataSize(width, 2);
+ GLsizei padded_row_size = two_rows_size - unpadded_row_size;
+ GLint offset = y * padded_row_size + x * bytes_per_pixel_;
+ return static_cast<const int8*>(address) + offset;
+ }
+
+ GLint ComputeImageDataSize(GLint width, GLint height) const {
+ GLint row_size = width * bytes_per_pixel_;
+ if (height > 1) {
+ GLint temp = row_size + pack_alignment_ - 1;
+ GLint padded_row_size = (temp / pack_alignment_) * pack_alignment_;
+ GLint size_of_all_but_last_row = (height - 1) * padded_row_size;
+ return size_of_all_but_last_row + row_size;
+ } else {
+ return height * row_size;
+ }
+ }
+
+ private:
+ const int8* GetPixelAddress(const int8* base, GLint x, GLint y) const {
+ return base + (width_ * y + x) * bytes_per_pixel_;
+ }
+
+ GLsizei width_;
+ GLsizei height_;
+ GLint pack_alignment_;
+ GLint bytes_per_pixel_;
+ const int8* src_pixels_;
+ const int8* expected_pixels_;
+};
+
+} // anonymous namespace
+
+void GLES2DecoderTest::CheckReadPixelsOutOfRange(GLint in_read_x,
+ GLint in_read_y,
+ GLsizei in_read_width,
+ GLsizei in_read_height,
+ bool init) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 3;
+ const GLint kPackAlignment = 4;
+ const GLenum kFormat = GL_RGB;
+ static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
+ 29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
+ 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
+ };
+
+ ClearSharedMemory();
+
+ // We need to setup an FBO so we can know the max size that ReadPixels will
+ // access
+ if (init) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+
+ ReadPixelsEmulator emu(
+ kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ // ReadPixels will be called for valid size only even though the command
+ // is requesting a larger size.
+ GLint read_x = std::max(0, in_read_x);
+ GLint read_y = std::max(0, in_read_y);
+ GLint read_end_x = std::max(0, std::min(kWidth, in_read_x + in_read_width));
+ GLint read_end_y = std::max(0, std::min(kHeight, in_read_y + in_read_height));
+ GLint read_width = read_end_x - read_x;
+ GLint read_height = read_end_y - read_y;
+ if (read_width > 0 && read_height > 0) {
+ for (GLint yy = read_y; yy < read_end_y; ++yy) {
+ EXPECT_CALL(
+ *gl_,
+ ReadPixels(read_x, yy, read_width, 1, kFormat, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels))
+ .RetiresOnSaturation();
+ }
+ }
+ ReadPixels cmd;
+ cmd.Init(in_read_x,
+ in_read_y,
+ in_read_width,
+ in_read_height,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ GLint unpadded_row_size = emu.ComputeImageDataSize(in_read_width, 1);
+ scoped_ptr<int8[]> zero(new int8[unpadded_row_size]);
+ scoped_ptr<int8[]> pack(new int8[kPackAlignment]);
+ memset(zero.get(), 0, unpadded_row_size);
+ memset(pack.get(), kInitialMemoryValue, kPackAlignment);
+ for (GLint yy = 0; yy < in_read_height; ++yy) {
+ const int8* row = static_cast<const int8*>(
+ emu.ComputePackAlignmentAddress(0, yy, in_read_width, dest));
+ GLint y = in_read_y + yy;
+ if (y < 0 || y >= kHeight) {
+ EXPECT_EQ(0, memcmp(zero.get(), row, unpadded_row_size));
+ } else {
+ // check off left.
+ GLint num_left_pixels = std::max(-in_read_x, 0);
+ GLint num_left_bytes = num_left_pixels * kBytesPerPixel;
+ EXPECT_EQ(0, memcmp(zero.get(), row, num_left_bytes));
+
+ // check off right.
+ GLint num_right_pixels = std::max(in_read_x + in_read_width - kWidth, 0);
+ GLint num_right_bytes = num_right_pixels * kBytesPerPixel;
+ EXPECT_EQ(0,
+ memcmp(zero.get(),
+ row + unpadded_row_size - num_right_bytes,
+ num_right_bytes));
+
+ // check middle.
+ GLint x = std::max(in_read_x, 0);
+ GLint num_middle_pixels =
+ std::max(in_read_width - num_left_pixels - num_right_pixels, 0);
+ EXPECT_TRUE(
+ emu.CompareRowSegment(x, y, num_middle_pixels, row + num_left_bytes));
+ }
+
+ // check padding
+ if (yy != in_read_height - 1) {
+ GLint num_padding_bytes =
+ (kPackAlignment - 1) - (unpadded_row_size % kPackAlignment);
+ EXPECT_EQ(0,
+ memcmp(pack.get(), row + unpadded_row_size, num_padding_bytes));
+ }
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixels) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 3;
+ const GLint kPackAlignment = 4;
+ static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
+ 29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
+ 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
+ };
+
+ surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
+
+ ReadPixelsEmulator emu(
+ kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ for (GLint yy = 0; yy < kHeight; ++yy) {
+ EXPECT_TRUE(emu.CompareRowSegment(
+ 0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
+ }
+}
+
+TEST_P(GLES2DecoderRGBBackbufferTest, ReadPixelsNoAlphaBackbuffer) {
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 3;
+ const GLint kBytesPerPixel = 4;
+ const GLint kPackAlignment = 4;
+ static const uint8 kExpectedPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 255, 19, 18, 19, 255, 13, 14, 18, 255,
+ 29, 28, 23, 255, 21, 22, 21, 255, 28, 23, 22, 255,
+ 31, 34, 39, 255, 32, 37, 32, 255, 34, 39, 37, 255,
+ };
+ static const uint8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
+ 12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 29, 28, 23, 22, 21, 22,
+ 21, 29, 28, 23, 22, 21, 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32,
+ };
+
+ surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
+
+ ReadPixelsEmulator emu(kWidth,
+ kHeight,
+ kBytesPerPixel,
+ kSrcPixels,
+ kExpectedPixels,
+ kPackAlignment);
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ void* dest = &result[1];
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ for (GLint yy = 0; yy < kHeight; ++yy) {
+ EXPECT_TRUE(emu.CompareRowSegment(
+ 0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsOutOfRange) {
+ static GLint tests[][4] = {
+ {
+ -2, -1, 9, 5,
+ }, // out of range on all sides
+ {
+ 2, 1, 9, 5,
+ }, // out of range on right, bottom
+ {
+ -7, -4, 9, 5,
+ }, // out of range on left, top
+ {
+ 0, -5, 9, 5,
+ }, // completely off top
+ {
+ 0, 3, 9, 5,
+ }, // completely off bottom
+ {
+ -9, 0, 9, 5,
+ }, // completely off left
+ {
+ 5, 0, 9, 5,
+ }, // completely off right
+ };
+
+ for (size_t tt = 0; tt < arraysize(tests); ++tt) {
+ CheckReadPixelsOutOfRange(
+ tests[tt][0], tests[tt][1], tests[tt][2], tests[tt][3], tt == 0);
+ }
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsInvalidArgs) {
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ EXPECT_CALL(*gl_, ReadPixels(_, _, _, _, _, _, _)).Times(0);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ -1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ -1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_INT,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ kInvalidSharedMemoryId,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ kInvalidSharedMemoryOffset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ kInvalidSharedMemoryId,
+ result_shm_offset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ kInvalidSharedMemoryOffset,
+ false);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ReadPixelsAsyncError) {
+ InitState init;
+ init.extensions = "GL_ARB_sync";
+ init.gl_version = "opengl es 3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ const GLsizei kWidth = 4;
+ const GLsizei kHeight = 4;
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ EXPECT_CALL(*gl_, GetError())
+ // first error check must pass to get to the test
+ .WillOnce(Return(GL_NO_ERROR))
+ // second check is after BufferData, simulate fail here
+ .WillOnce(Return(GL_INVALID_OPERATION))
+ // third error check is fall-through call to sync ReadPixels
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(*gl_,
+ ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
+ .Times(1);
+ EXPECT_CALL(*gl_, GenBuffersARB(1, _)).Times(1);
+ EXPECT_CALL(*gl_, BindBuffer(GL_PIXEL_PACK_BUFFER_ARB, _)).Times(2);
+ EXPECT_CALL(*gl_,
+ BufferData(GL_PIXEL_PACK_BUFFER_ARB, _, NULL, GL_STREAM_READ))
+ .Times(1);
+
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+// Check that if a renderbuffer is attached and GL returns
+// GL_FRAMEBUFFER_COMPLETE that the buffer is cleared and state is restored.
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearColor) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearColor color_cmd;
+ ColorMask color_mask_cmd;
+ Enable enable_cmd;
+ FramebufferRenderbuffer cmd;
+ color_cmd.Init(0.1f, 0.2f, 0.3f, 0.4f);
+ color_mask_cmd.Init(0, 1, 0, 1);
+ enable_cmd.Init(GL_SCISSOR_TEST);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearColor(0.1f, 0.2f, 0.3f, 0.4f))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, true);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(color_mask_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(enable_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepth) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearDepthf depth_cmd;
+ DepthMask depth_mask_cmd;
+ FramebufferRenderbuffer cmd;
+ depth_cmd.Init(0.5f);
+ depth_mask_cmd.Init(false);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearDepth(0.5f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_mask_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearStencil) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ ClearStencil stencil_cmd;
+ StencilMaskSeparate stencil_mask_separate_cmd;
+ FramebufferRenderbuffer cmd;
+ stencil_cmd.Init(123);
+ stencil_mask_separate_cmd.Init(GL_BACK, 0x1234u);
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearStencil(123)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_mask_separate_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+#if 0 // Turn this test on once we allow GL_DEPTH_STENCIL_ATTACHMENT
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepthStencil) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ ClearDepthf depth_cmd;
+ ClearStencil stencil_cmd;
+ FramebufferRenderbuffer cmd;
+ depth_cmd.Init(0.5f);
+ stencil_cmd.Init(123);
+ cmd.Init(
+ GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ InSequence sequence;
+ EXPECT_CALL(*gl_, ClearDepth(0.5f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ClearStencil(123))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(
+ GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+#endif
+
+TEST_P(GLES2DecoderManualInitTest, ActualAlphaMatchesRequestedAlpha) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualAlphaDoesNotMatchRequestedAlpha) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualDepthMatchesRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualDepthDoesNotMatchRequestedDepth) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualStencilMatchesRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ActualStencilDoesNotMatchRequestedStencil) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilReportsCorrectValues) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilNoRequestedStencil) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferDepth) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(24, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferStencil) {
+ InitState init;
+ init.extensions = "GL_OES_packed_depth_stencil";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetIntegerv cmd2;
+ cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(8))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(8, result->GetData()[0]);
+ result->size = 0;
+ cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
+ .WillOnce(SetArgumentPointee<1>(24))
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(0, result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderTest, FramebufferRenderbufferGLError) {
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer cmd;
+ cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, FramebufferTexture2DGLError) {
+ const GLsizei kWidth = 5;
+ const GLsizei kHeight = 3;
+ const GLenum kFormat = GL_RGB;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ kFormat,
+ kWidth,
+ kHeight,
+ 0,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ FramebufferTexture2D fbtex_cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageGLError) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(GL_RENDERBUFFER, GL_RGBA, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, RenderbufferStorageBadArgs) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ RenderbufferStorage cmd;
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, TestHelper::kMaxRenderbufferSize + 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 1, TestHelper::kMaxRenderbufferSize + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMGLError) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EnsureRenderbufferBound(false);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, 1, GL_RGBA, 100, 50))
+ .Times(1)
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER, 1, GL_RGBA4, 100, 50);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMBadArgs) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ EXPECT_CALL(*gl_, RenderbufferStorageMultisampleEXT(_, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples + 1,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize + 1,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ 1,
+ TestHelper::kMaxRenderbufferSize + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, RenderbufferStorageMultisampleCHROMIUM) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ EnsureRenderbufferBound(false);
+ DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleCHROMIUMRebindRenderbuffer) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RestoreRenderbufferBindings();
+ InSequence sequence;
+ EnsureRenderbufferBound(true);
+ DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ RenderbufferStorageMultisampleEXTNotSupported) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ // GL_EXT_framebuffer_multisample uses RenderbufferStorageMultisampleCHROMIUM.
+ RenderbufferStorageMultisampleEXT cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+class GLES2DecoderMultisampledRenderToTextureTest
+ : public GLES2DecoderTestWithExtensionsOnGLES2 {
+ public:
+ void TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM() {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ RenderbufferStorageMultisampleCHROMIUM cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+
+ void TestRenderbufferStorageMultisampleEXT(const char* extension,
+ bool rb_rebind) {
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ InSequence sequence;
+ if (rb_rebind) {
+ RestoreRenderbufferBindings();
+ EnsureRenderbufferBound(true);
+ } else {
+ EnsureRenderbufferBound(false);
+ }
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ if (strstr(extension, "GL_IMG_multisampled_render_to_texture")) {
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleIMG(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(
+ *gl_,
+ RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA,
+ TestHelper::kMaxRenderbufferSize,
+ 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ RenderbufferStorageMultisampleEXT cmd;
+ cmd.Init(GL_RENDERBUFFER,
+ TestHelper::kMaxSamples,
+ GL_RGBA4,
+ TestHelper::kMaxRenderbufferSize,
+ 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderMultisampledRenderToTextureTest,
+ ::testing::Bool());
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_EXT) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_IMG) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_EXT) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
+ false);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_IMG) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
+ false);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_EXT_RebindRenderbuffer) {
+ Init("GL_EXT_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
+ true);
+}
+
+TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
+ RenderbufferStorageMultisampleEXT_IMG_RebindRenderbuffer) {
+ Init("GL_IMG_multisampled_render_to_texture");
+ TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
+ true);
+}
+
+TEST_P(GLES2DecoderTest, ReadPixelsGLError) {
+ GLenum kFormat = GL_RGBA;
+ GLint x = 0;
+ GLint y = 0;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ ReadPixels(x, y, width, height, kFormat, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ReadPixels cmd;
+ cmd.Init(x,
+ y,
+ width,
+ height,
+ kFormat,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnClear) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ Clear cmd;
+ cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnReadPixels) {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ typedef ReadPixels::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ UnClearedAttachmentsGetClearedOnReadPixelsAndDrawBufferGetsRestored) {
+ InitState init;
+ init.extensions = "GL_EXT_framebuffer_multisample";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render from" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_READ_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Enable GL_SCISSOR_TEST to make sure we disable it in the clear,
+ // then re-enable after.
+ DoEnableDisable(GL_SCISSOR_TEST, true);
+
+ SetupExpectationsForFramebufferClearingMulti(
+ kServiceFramebufferId, // read framebuffer service id
+ 0, // backbuffer service id
+ GL_READ_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ true); // scissor test
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ typedef ReadPixels::Result Result;
+ uint32 result_shm_id = kSharedMemoryId;
+ uint32 result_shm_offset = kSharedMemoryOffset;
+ uint32 pixels_shm_id = kSharedMemoryId;
+ uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(Result);
+ ReadPixels cmd;
+ cmd.Init(0,
+ 0,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ false);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, CopyTexImageWithInCompleteFBOFails) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 0, 0, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ EXPECT_CALL(*gl_, CopyTexImage2D(_, _, _, _, _, _, _, _))
+ .Times(0)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_FRAMEBUFFER_OPERATION, GetGLError());
+}
+
+void GLES2DecoderWithShaderTest::CheckRenderbufferChangesMarkFBOAsNotComplete(
+ bool bound_fbo) {
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ SetupTexture();
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ if (!bound_fbo) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+ }
+
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ ASSERT_TRUE(framebuffer != NULL);
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test that renderbufferStorage marks fbo as not complete.
+ DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test deleting renderbuffer marks fbo as not complete.
+ DoDeleteRenderbuffer(client_renderbuffer_id_, kServiceRenderbufferId);
+ if (bound_fbo) {
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ } else {
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ }
+ // Cleanup
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ RenderbufferChangesMarkFBOAsNotCompleteBoundFBO) {
+ CheckRenderbufferChangesMarkFBOAsNotComplete(true);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ RenderbufferChangesMarkFBOAsNotCompleteUnboundFBO) {
+ CheckRenderbufferChangesMarkFBOAsNotComplete(false);
+}
+
+void GLES2DecoderWithShaderTest::CheckTextureChangesMarkFBOAsNotComplete(
+ bool bound_fbo) {
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ SetupTexture();
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ DoBindRenderbuffer(
+ GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoRenderbufferStorage(GL_RENDERBUFFER,
+ GL_DEPTH_COMPONENT16,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_NO_ERROR);
+ DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_,
+ kServiceRenderbufferId,
+ GL_NO_ERROR);
+
+ if (!bound_fbo) {
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+ }
+
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ ASSERT_TRUE(framebuffer != NULL);
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test TexImage2D marks fbo as not complete.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, 0, 0);
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test CopyImage2D marks fbo as not complete.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Test deleting texture marks fbo as not complete.
+ framebuffer_manager->MarkAsComplete(framebuffer);
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ DoDeleteTexture(kFBOClientTextureId, kFBOServiceTextureId);
+
+ if (bound_fbo) {
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+ } else {
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+ }
+ // Cleanup
+ DoDeleteFramebuffer(client_framebuffer_id_,
+ kServiceFramebufferId,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0,
+ bound_fbo,
+ GL_FRAMEBUFFER,
+ 0);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, TextureChangesMarkFBOAsNotCompleteBoundFBO) {
+ CheckTextureChangesMarkFBOAsNotComplete(true);
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ TextureChangesMarkFBOAsNotCompleteUnboundFBO) {
+ CheckTextureChangesMarkFBOAsNotComplete(false);
+}
+
+TEST_P(GLES2DecoderTest, CanChangeSurface) {
+ scoped_refptr<GLSurfaceMock> other_surface(new GLSurfaceMock);
+ EXPECT_CALL(*other_surface.get(), GetBackingFrameBufferObject())
+ .WillOnce(Return(7));
+ EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER_EXT, 7));
+
+ decoder_->SetSurface(other_surface);
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateSuccceeds) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_COLOR_ATTACHMENT0};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateFails) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_COLOR_ATTACHMENT1_EXT};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateBackbuffer) {
+ const GLsizei count = 1;
+ const GLenum bufs[] = {GL_BACK};
+ DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
+ cmd.Init(count, bufs);
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0); // unbind
+
+ EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, InvalidateFramebufferBinding) {
+ InitState init;
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+
+ // EXPECT_EQ can't be used to compare function pointers
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") !=
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT"));
+}
+
+TEST_P(GLES2DecoderManualInitTest, DiscardFramebufferEXT) {
+ InitState init;
+ init.extensions = "GL_EXT_discard_framebuffer";
+ init.gl_version = "opengl es 2.0";
+ InitDecoder(init);
+
+ // EXPECT_EQ can't be used to compare function pointers
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ SetupTexture();
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+
+ EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(framebuffer->IsCleared());
+}
+
+TEST_P(GLES2DecoderTest, DiscardFramebufferEXTUnsupported) {
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_EXT};
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ // Should not result into a call into GL.
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest,
+ DiscardedAttachmentsEXTMarksFramebufferIncomplete) {
+ InitState init;
+ init.extensions = "GL_EXT_discard_framebuffer";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0,
+ 0,
+ 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ false); // scissor test
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
+
+ Clear clear_cmd;
+ clear_cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(clear_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that framebuffer is cleared and complete.
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+ EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
+
+ // Check that Discard GL_COLOR_ATTACHMENT0, sets the attachment as uncleared
+ // and the framebuffer as incomplete.
+ EXPECT_TRUE(
+ gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
+ gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+
+ const GLenum target = GL_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ DiscardFramebufferEXTImmediate& discard_cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ discard_cmd.Init(target, count, attachments);
+
+ EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(discard_cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(framebuffer->IsCleared());
+ EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
+}
+
+TEST_P(GLES2DecoderManualInitTest, ReadFormatExtension) {
+ InitState init;
+ init.extensions = "GL_OES_read_format";
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError()).Times(6).RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoReadFormatExtension) {
+ InitState init;
+ init.gl_version = "2.1";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kFBOClientTextureId,
+ kFBOServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// TODO(gman): PixelStorei
+
+// TODO(gman): SwapBuffers
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
new file mode 100644
index 0000000..05cb9ff
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -0,0 +1,1045 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderWithShaderTest, GetProgramInfoCHROMIUMValidArgs) {
+ const uint32 kBucketId = 123;
+ GetProgramInfoCHROMIUM cmd;
+ cmd.Init(client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ EXPECT_GT(bucket->size(), 0u);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetProgramInfoCHROMIUMInvalidArgs) {
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ EXPECT_TRUE(bucket == NULL);
+ GetProgramInfoCHROMIUM cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(sizeof(ProgramInfoHeader), bucket->size());
+ ProgramInfoHeader* info =
+ bucket->GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(info != 0);
+ EXPECT_EQ(0u, info->link_status);
+ EXPECT_EQ(0u, info->num_attribs);
+ EXPECT_EQ(0u, info->num_uniforms);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivSucceeds) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(kServiceProgramId, kUniform2RealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivArrayElementSucceeds) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2ElementFakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ GetUniformiv(kServiceProgramId, kUniform2ElementRealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadProgramFails) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ // non-existant program
+ cmd.Init(kInvalidClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+// Valid id that is not a program. The GL spec requires a different error for
+// this case.
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->size = kInitialResult;
+ cmd.Init(client_shader_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ // Unlinked program
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(kNewServiceId))
+ .RetiresOnSaturation();
+ CreateProgram cmd2;
+ cmd2.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ result->size = kInitialResult;
+ cmd.Init(kNewClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadLocationFails) {
+ GetUniformiv::Result* result =
+ static_cast<GetUniformiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformiv cmd;
+ // invalid location
+ cmd.Init(client_program_id_,
+ kInvalidUniformLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadSharedMemoryFails) {
+ GetUniformiv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformiv(_, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+};
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvSucceeds) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(kServiceProgramId, kUniform2RealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvArrayElementSucceeds) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2ElementFakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ GetUniformfv(kServiceProgramId, kUniform2ElementRealLocation, _))
+ .Times(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GLES2Util::GetGLDataTypeSizeForUniforms(kUniform2Type),
+ result->size);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadProgramFails) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ // non-existant program
+ cmd.Init(kInvalidClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+// Valid id that is not a program. The GL spec requires a different error for
+// this case.
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->size = kInitialResult;
+ cmd.Init(client_shader_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ // Unlinked program
+ EXPECT_CALL(*gl_, CreateProgram())
+ .Times(1)
+ .WillOnce(Return(kNewServiceId))
+ .RetiresOnSaturation();
+ CreateProgram cmd2;
+ cmd2.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ result->size = kInitialResult;
+ cmd.Init(kNewClientId,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadLocationFails) {
+ GetUniformfv::Result* result =
+ static_cast<GetUniformfv::Result*>(shared_memory_address_);
+ result->size = 0;
+ GetUniformfv cmd;
+ // invalid location
+ cmd.Init(client_program_id_,
+ kInvalidUniformLocation,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadSharedMemoryFails) {
+ GetUniformfv cmd;
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GetUniformfv(_, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniform2FakeLocation,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+};
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersSucceeds) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetAttachedShaders(kServiceProgramId, 1, _, _)).WillOnce(
+ DoAll(SetArgumentPointee<2>(1), SetArgumentPointee<3>(kServiceShaderId)));
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(client_shader_id_, result->GetData()[0]);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersResultNotInitFail) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 1;
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersBadProgramFails) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ cmd.Init(kInvalidClientId,
+ shared_memory_id_,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0U, result->size);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersBadSharedMemoryFails) {
+ GetAttachedShaders cmd;
+ typedef GetAttachedShaders::Result Result;
+ cmd.Init(client_program_id_,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_,
+ Result::ComputeSize(1));
+ EXPECT_CALL(*gl_, GetAttachedShaders(_, _, _, _)).Times(0);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset,
+ Result::ComputeSize(1));
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatSucceeds) {
+ ScopedGLImplementationSetter gl_impl(::gfx::kGLImplementationEGLGLES2);
+ GetShaderPrecisionFormat cmd;
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ const GLint range[2] = {62, 62};
+ const GLint precision = 16;
+ EXPECT_CALL(*gl_, GetShaderPrecisionFormat(_, _, _, _))
+ .WillOnce(DoAll(SetArrayArgument<2>(range, range + 2),
+ SetArgumentPointee<3>(precision)))
+ .RetiresOnSaturation();
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(range[0], result->min_range);
+ EXPECT_EQ(range[1], result->max_range);
+ EXPECT_EQ(precision, result->precision);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatResultNotInitFails) {
+ GetShaderPrecisionFormat cmd;
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ // NOTE: GL might not be called. There is no Desktop OpenGL equivalent
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderPrecisionFormatBadArgsFails) {
+ typedef GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ GetShaderPrecisionFormat cmd;
+ cmd.Init(
+ GL_TEXTURE_2D, GL_HIGH_FLOAT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ result->success = 0;
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_TEXTURE_2D,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest,
+ GetShaderPrecisionFormatBadSharedMemoryFails) {
+ GetShaderPrecisionFormat cmd;
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_HIGH_FLOAT,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(GL_VERTEX_SHADER,
+ GL_TEXTURE_2D,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformSucceeds) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(kUniform2Size, result->size);
+ EXPECT_EQ(kUniform2Type, result->type);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(
+ 0,
+ memcmp(
+ bucket->GetData(0, bucket->size()), kUniform2Name, bucket->size()));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformResultNotInitFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadProgramFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(kInvalidClientId,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->success = 0;
+ cmd.Init(client_shader_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadIndexFails) {
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ typedef GetActiveUniform::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kBadUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveUniformBadSharedMemoryFails) {
+ const GLuint kUniformIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveUniform cmd;
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kUniformIndex,
+ kBucketId,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribSucceeds) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_NE(0, result->success);
+ EXPECT_EQ(kAttrib2Size, result->size);
+ EXPECT_EQ(kAttrib2Type, result->type);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(
+ 0,
+ memcmp(bucket->GetData(0, bucket->size()), kAttrib2Name, bucket->size()));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribResultNotInitFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 1;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadProgramFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(kInvalidClientId,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ result->success = 0;
+ cmd.Init(client_shader_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadIndexFails) {
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ typedef GetActiveAttrib::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->success = 0;
+ cmd.Init(client_program_id_,
+ kBadAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, result->success);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetActiveAttribBadSharedMemoryFails) {
+ const GLuint kAttribIndex = 1;
+ const uint32 kBucketId = 123;
+ GetActiveAttrib cmd;
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ shared_memory_offset_);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kAttribIndex,
+ kBucketId,
+ shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderInfoLogValidArgs) {
+ const char* kInfo = "hello";
+ const uint32 kBucketId = 123;
+ CompileShader compile_cmd;
+ GetShaderInfoLog cmd;
+ EXPECT_CALL(*gl_, ShaderSource(kServiceShaderId, 1, _, _));
+ EXPECT_CALL(*gl_, CompileShader(kServiceShaderId));
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_FALSE))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(strlen(kInfo) + 1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetShaderInfoLog(kServiceShaderId, strlen(kInfo) + 1, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<2>(strlen(kInfo)),
+ SetArrayArgument<3>(kInfo, kInfo + strlen(kInfo) + 1)));
+ compile_cmd.Init(client_shader_id_);
+ cmd.Init(client_shader_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(compile_cmd));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
+ EXPECT_EQ(0,
+ memcmp(bucket->GetData(0, bucket->size()), kInfo, bucket->size()));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetShaderInfoLogInvalidArgs) {
+ const uint32 kBucketId = 123;
+ GetShaderInfoLog cmd;
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, CompileShaderValidArgs) {
+ EXPECT_CALL(*gl_, ShaderSource(kServiceShaderId, 1, _, _));
+ EXPECT_CALL(*gl_, CompileShader(kServiceShaderId));
+ EXPECT_CALL(*gl_, GetShaderiv(kServiceShaderId, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE))
+ .RetiresOnSaturation();
+ CompileShader cmd;
+ cmd.Init(client_shader_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CompileShaderInvalidArgs) {
+ CompileShader cmd;
+ cmd.Init(kInvalidClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceBucketAndGetShaderSourceValidArgs) {
+ const uint32 kInBucketId = 123;
+ const uint32 kOutBucketId = 125;
+ const char kSource[] = "hello";
+ const uint32 kSourceSize = sizeof(kSource) - 1;
+ SetBucketAsCString(kInBucketId, kSource);
+ ShaderSourceBucket cmd;
+ cmd.Init(client_shader_id_, kInBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ ClearSharedMemory();
+ GetShaderSource get_cmd;
+ get_cmd.Init(client_shader_id_, kOutBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(get_cmd));
+ CommonDecoder::Bucket* bucket = decoder_->GetBucket(kOutBucketId);
+ ASSERT_TRUE(bucket != NULL);
+ EXPECT_EQ(kSourceSize + 1, bucket->size());
+ EXPECT_EQ(
+ 0, memcmp(bucket->GetData(0, bucket->size()), kSource, bucket->size()));
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceBucketInvalidArgs) {
+ const uint32 kBucketId = 123;
+ const char kSource[] = "hello";
+ const uint32 kSourceSize = sizeof(kSource) - 1;
+ memcpy(shared_memory_address_, kSource, kSourceSize);
+ ShaderSourceBucket cmd;
+ // Test no bucket.
+ cmd.Init(client_texture_id_, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // Test invalid client.
+ SetBucketAsCString(kBucketId, kSource);
+ cmd.Init(kInvalidClientId, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+#if GLES2_TEST_SHADER_VS_PROGRAM_IDS
+ SetBucketAsCString(kBucketId, kSource);
+ cmd.Init(
+ client_program_id_, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+#endif // GLES2_TEST_SHADER_VS_PROGRAM_IDS
+}
+
+TEST_P(GLES2DecoderTest, ShaderSourceStripComments) {
+ const uint32 kInBucketId = 123;
+ const char kSource[] = "hello/*te\ast*/world//a\ab";
+ SetBucketAsCString(kInBucketId, kSource);
+ ShaderSourceBucket cmd;
+ cmd.Init(client_shader_id_, kInBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1iValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1i(kUniform1RealLocation, 2));
+ Uniform1i cmd;
+ cmd.Init(kUniform1FakeLocation, 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivImmediateValidArgs) {
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ EXPECT_CALL(*gl_,
+ Uniform1iv(kUniform1RealLocation,
+ 1,
+ reinterpret_cast<GLint*>(ImmediateDataAddress(&cmd))));
+ GLint temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(kUniform1FakeLocation, 1, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivImmediateInvalidValidArgs) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp[1 * 2] = {
+ 0,
+ };
+ cmd.Init(kUniform1FakeLocation, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivZeroCount) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp = 0;
+ cmd.Init(kUniform1FakeLocation, 0, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1iSamplerIsLmited) {
+ EXPECT_CALL(*gl_, Uniform1i(_, _)).Times(0);
+ Uniform1i cmd;
+ cmd.Init(kUniform1FakeLocation, kNumTextureUnits);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, Uniform1ivSamplerIsLimited) {
+ EXPECT_CALL(*gl_, Uniform1iv(_, _, _)).Times(0);
+ Uniform1ivImmediate& cmd = *GetImmediateAs<Uniform1ivImmediate>();
+ GLint temp[] = {kNumTextureUnits};
+ cmd.Init(kUniform1FakeLocation, 1, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, BindAttribLocationBucket) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ EXPECT_CALL(*gl_,
+ BindAttribLocation(kServiceProgramId, kLocation, StrEq(kName)))
+ .Times(1);
+ SetBucketAsCString(kBucketId, kName);
+ BindAttribLocationBucket cmd;
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, BindAttribLocationBucketInvalidArgs) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ EXPECT_CALL(*gl_, BindAttribLocation(_, _, _)).Times(0);
+ BindAttribLocationBucket cmd;
+ // check bucket does not exist.
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // check bucket is empty.
+ SetBucketAsCString(kBucketId, NULL);
+ cmd.Init(client_program_id_, kLocation, kBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ // Check bad program id
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(kInvalidClientId, kLocation, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttribLocation) {
+ const uint32 kBucketId = 123;
+ const char* kNonExistentName = "foobar";
+ typedef GetAttribLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ SetBucketAsCString(kBucketId, kAttrib2Name);
+ *result = -1;
+ GetAttribLocation cmd;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kAttrib2Location, *result);
+ SetBucketAsCString(kBucketId, kNonExistentName);
+ *result = -1;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetAttribLocationInvalidArgs) {
+ const uint32 kBucketId = 123;
+ typedef GetAttribLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = -1;
+ GetAttribLocation cmd;
+ // Check no bucket
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ // Check bad program id.
+ SetBucketAsCString(kBucketId, kAttrib2Name);
+ cmd.Init(kInvalidClientId, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ *result = -1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad memory
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformLocation) {
+ const uint32 kBucketId = 123;
+ const char* kNonExistentName = "foobar";
+ typedef GetUniformLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ SetBucketAsCString(kBucketId, kUniform2Name);
+ *result = -1;
+ GetUniformLocation cmd;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(kUniform2FakeLocation, *result);
+ SetBucketAsCString(kBucketId, kNonExistentName);
+ *result = -1;
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+}
+
+TEST_P(GLES2DecoderWithShaderTest, GetUniformLocationInvalidArgs) {
+ const uint32 kBucketId = 123;
+ typedef GetUniformLocation::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = -1;
+ GetUniformLocation cmd;
+ // Check no bucket
+ cmd.Init(client_program_id_, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ // Check bad program id.
+ SetBucketAsCString(kBucketId, kUniform2Name);
+ cmd.Init(kInvalidClientId, kBucketId, kSharedMemoryId, kSharedMemoryOffset);
+ *result = -1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(-1, *result);
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // Check bad memory
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(client_program_id_,
+ kBucketId,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderWithShaderTest, BindUniformLocationCHROMIUMBucket) {
+ const uint32 kBucketId = 123;
+ const GLint kLocation = 2;
+ const char* kName = "testing";
+ const char* kBadName1 = "gl_testing";
+ const char* kBadName2 = "testing[1]";
+
+ SetBucketAsCString(kBucketId, kName);
+ BindUniformLocationCHROMIUMBucket cmd;
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // check negative location
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(client_program_id_, -1, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // check highest location
+ SetBucketAsCString(kBucketId, kName);
+ GLint kMaxLocation =
+ (kMaxFragmentUniformVectors + kMaxVertexUniformVectors) * 4 - 1;
+ cmd.Init(client_program_id_,
+ kMaxLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // check too high location
+ SetBucketAsCString(kBucketId, kName);
+ cmd.Init(client_program_id_,
+ kMaxLocation + 1,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ // check bad name "gl_..."
+ SetBucketAsCString(kBucketId, kBadName1);
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // check bad name "name[1]" non zero
+ SetBucketAsCString(kBucketId, kBadName2);
+ cmd.Init(client_program_id_,
+ kLocation,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, ClearUniformsBeforeFirstProgramUse) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::CLEAR_UNIFORMS_BEFORE_FIRST_PROGRAM_USE));
+ InitState init;
+ init.gl_version = "3.0";
+ init.has_alpha = true;
+ init.request_alpha = true;
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ {
+ static AttribInfo attribs[] = {
+ {
+ kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location,
+ },
+ {
+ kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location,
+ },
+ {
+ kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location,
+ },
+ };
+ static UniformInfo uniforms[] = {
+ {kUniform1Name, kUniform1Size, kUniform1Type, kUniform1FakeLocation,
+ kUniform1RealLocation, kUniform1DesiredLocation},
+ {kUniform2Name, kUniform2Size, kUniform2Type, kUniform2FakeLocation,
+ kUniform2RealLocation, kUniform2DesiredLocation},
+ {kUniform3Name, kUniform3Size, kUniform3Type, kUniform3FakeLocation,
+ kUniform3RealLocation, kUniform3DesiredLocation},
+ };
+ SetupShader(attribs,
+ arraysize(attribs),
+ uniforms,
+ arraysize(uniforms),
+ client_program_id_,
+ kServiceProgramId,
+ client_vertex_shader_id_,
+ kServiceVertexShaderId,
+ client_fragment_shader_id_,
+ kServiceFragmentShaderId);
+ TestHelper::SetupExpectationsForClearingUniforms(
+ gl_.get(), uniforms, arraysize(uniforms));
+ }
+
+ {
+ EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::UseProgram cmd;
+ cmd.Init(client_program_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+// TODO(gman): DeleteProgram
+
+// TODO(gman): UseProgram
+
+// TODO(gman): DeleteShader
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
new file mode 100644
index 0000000..82d5653
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -0,0 +1,2842 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_surface_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+#if !defined(GL_DEPTH24_STENCIL8)
+#define GL_DEPTH24_STENCIL8 0x88F0
+#endif
+
+using ::gfx::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MatcherCast;
+using ::testing::Mock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_P(GLES2DecoderTest, GenerateMipmapWrongFormatsFails) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, GenerateMipmapHandlesOutOfMemory) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ GLint width = 0;
+ GLint height = 0;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 2, &width, &height));
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 16,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D)).Times(1);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 2, &width, &height));
+}
+
+TEST_P(GLES2DecoderTest, GenerateMipmapClearsUnclearedTexture) {
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Same as GenerateMipmapClearsUnclearedTexture, but with workaround
+// |set_texture_filters_before_generating_mipmap|.
+TEST_P(GLES2DecoderManualInitTest, SetTextureFiltersBeforeGenerateMipmap) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::SET_TEXTURE_FILTER_BEFORE_GENERATING_MIPMAP));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ActiveTextureValidArgs) {
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
+ SpecializedSetup<ActiveTexture, 0>(true);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ActiveTextureInvalidArgs) {
+ EXPECT_CALL(*gl_, ActiveTexture(_)).Times(0);
+ SpecializedSetup<ActiveTexture, 0>(false);
+ ActiveTexture cmd;
+ cmd.Init(GL_TEXTURE0 - 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(kNumTextureUnits);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DValidArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DBadArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE0,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_TRUE,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ -1,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 1,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ -1,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 1,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth + 1,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight + 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kInvalidSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(GL_TEXTURE_2D,
+ 1,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kInvalidSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DValidArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ CopyTexSubImage2D(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DBadArgs) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE0, 1, 0, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, -1, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 1, 0, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, -1, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 1, 0, 0, kWidth, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth + 1, kHeight);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 1, 0, 0, 0, 0, kWidth, kHeight + 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, TexImage2DRedefinitionSucceeds) {
+ const int kWidth = 16;
+ const int kHeight = 8;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
+ for (int ii = 0; ii < 2; ++ii) {
+ TexImage2D cmd;
+ if (ii == 0) {
+ EXPECT_CALL(*gl_,
+ TexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ } else {
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kWidth,
+ kHeight);
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 0,
+ 0);
+ }
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight - 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Consider this TexSubImage2D command part of the previous TexImage2D
+ // (last GL_TRUE argument). It will be skipped if there are bugs in the
+ // redefinition case.
+ TexSubImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight - 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_TRUE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ }
+}
+
+TEST_P(GLES2DecoderTest, TexImage2DGLError) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLint border = 0;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexImage2D(target,
+ level,
+ internal_format,
+ width,
+ height,
+ border,
+ format,
+ type,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexImage2DGLError) {
+ GLenum target = GL_TEXTURE_2D;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLint border = 0;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_OUT_OF_MEMORY))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ CopyTexImage2D(
+ target, level, internal_format, 0, 0, width, height, border))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(target, level, internal_format, 0, 0, width, height);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, level, &width, &height));
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DBucketBadBucket) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ const uint32 kBadBucketId = 123;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ CompressedTexImage2DBucket cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,
+ 4,
+ 4,
+ kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ CompressedTexSubImage2DBucket cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 4,
+ 4,
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,
+ kBadBucketId);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+namespace {
+
+struct S3TCTestData {
+ GLenum format;
+ size_t block_size;
+};
+
+} // anonymous namespace.
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ static const S3TCTestData test_data[] = {
+ {
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT, 8,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, 8,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, 16,
+ },
+ {
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, 16,
+ },
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_data); ++ii) {
+ const S3TCTestData& test = test_data[ii];
+ CompressedTexImage2DBucket cmd;
+ // test small width.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 2, 4, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test bad width.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 5, 4, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // test small height.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 4, 2, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test too bad height.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 5, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // test small for level 0.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 1, 1, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test small for level 0.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 2, 2, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test size too large.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 4, kBucketId);
+ bucket->SetSize(test.block_size * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test size too small.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 4, kBucketId);
+ bucket->SetSize(test.block_size / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test with 3 mips.
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, test.format, 4, 4, 0, test.block_size, kBucketId);
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 1, test.format, 2, 2, 0, test.block_size, kBucketId);
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 2, test.format, 1, 1, 0, test.block_size, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Test a 16x16
+ DoCompressedTexImage2D(GL_TEXTURE_2D,
+ 0,
+ test.format,
+ 16,
+ 16,
+ 0,
+ test.block_size * 4 * 4,
+ kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ CompressedTexSubImage2DBucket sub_cmd;
+ bucket->SetSize(test.block_size);
+ // Test sub image bad xoffset
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 1, 0, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad yoffset
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 2, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad width
+ bucket->SetSize(test.block_size * 2);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 5, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad height
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 5, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test sub image bad size
+ bucket->SetSize(test.block_size + 1);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 4, test.format, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ for (GLint yoffset = 0; yoffset <= 8; yoffset += 4) {
+ for (GLint xoffset = 0; xoffset <= 8; xoffset += 4) {
+ for (GLsizei height = 4; height <= 8; height += 4) {
+ for (GLsizei width = 4; width <= 8; width += 4) {
+ GLsizei size = test.block_size * (width / 4) * (height / 4);
+ bucket->SetSize(size);
+ EXPECT_CALL(*gl_,
+ CompressedTexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ test.format,
+ size,
+ _))
+ .Times(1)
+ .RetiresOnSaturation();
+ sub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ test.format,
+ kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DETC1) {
+ InitState init;
+ init.extensions = "GL_OES_compressed_ETC1_RGB8_texture";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ const uint32 kBucketId = 123;
+ CommonDecoder::Bucket* bucket = decoder_->CreateBucket(kBucketId);
+ ASSERT_TRUE(bucket != NULL);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ const GLenum kFormat = GL_ETC1_RGB8_OES;
+ const size_t kBlockSize = 8;
+
+ CompressedTexImage2DBucket cmd;
+ // test small width.
+ DoCompressedTexImage2D(GL_TEXTURE_2D, 0, kFormat, 4, 8, 0, 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test small height.
+ DoCompressedTexImage2D(GL_TEXTURE_2D, 0, kFormat, 8, 4, 0, 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // test size too large.
+ cmd.Init(GL_TEXTURE_2D, 0, kFormat, 4, 4, kBucketId);
+ bucket->SetSize(kBlockSize * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // test size too small.
+ cmd.Init(GL_TEXTURE_2D, 0, kFormat, 4, 4, kBucketId);
+ bucket->SetSize(kBlockSize / 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+
+ // Test a 16x16
+ DoCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, kFormat, 16, 16, 0, kBlockSize * 16, kBucketId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Test CompressedTexSubImage not allowed
+ CompressedTexSubImage2DBucket sub_cmd;
+ bucket->SetSize(kBlockSize);
+ sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 4, 4, kFormat, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test TexSubImage not allowed for ETC1 compressed texture
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ GLenum type, internal_format;
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(kFormat, internal_format);
+ TexSubImage2D texsub_cmd;
+ texsub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 4,
+ 4,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(texsub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Test CopyTexSubImage not allowed for ETC1 compressed texture
+ CopyTexSubImage2D copy_cmd;
+ copy_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 4, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalBindTexture) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_EXTERNAL_OES, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ BindTexture cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ TextureRef* texture_ref = GetTexture(kNewClientId);
+ EXPECT_TRUE(texture_ref != NULL);
+ EXPECT_TRUE(texture_ref->texture()->target() == GL_TEXTURE_EXTERNAL_OES);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalGetBinding) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd;
+ cmd.Init(GL_TEXTURE_BINDING_EXTERNAL_OES,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_TEXTURE_BINDING_EXTERNAL_OES),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(client_texture_id_, (uint32)result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureDefaults) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureParam) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTextureParamInvalid) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES,
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, EGLImageExternalTexImage2DError) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GLenum target = GL_TEXTURE_EXTERNAL_OES;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+ ASSERT_TRUE(GetTexture(client_texture_id_) != NULL);
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ // TexImage2D is not allowed with GL_TEXTURE_EXTERNAL_OES targets.
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DefaultTextureZero) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ BindTexture cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DefaultTextureBGR) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(
+ *gl_, BindTexture(GL_TEXTURE_2D, TestHelper::kServiceDefaultTexture2dId));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ BindTexture cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_,
+ BindTexture(GL_TEXTURE_CUBE_MAP,
+ TestHelper::kServiceDefaultTextureCubemapId));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// Test that default texture 0 is immutable.
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameterf) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameterf cmd2;
+ cmd2.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameterf cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameteri) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameteri cmd2;
+ cmd2.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexParameteri cmd2;
+ cmd2.Init(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameterfv) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexParameteriv) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_2D,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+
+ {
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_CUBE_MAP, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_CUBE_MAP, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ GLfloat data = GL_NEAREST;
+ TexParameterfvImmediate& cmd2 =
+ *GetImmediateAs<TexParameterfvImmediate>();
+ cmd2.Init(GL_TEXTURE_CUBE_MAP,
+ GL_TEXTURE_MAG_FILTER,
+ &data);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd2, sizeof(data)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexImage2D) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, NoDefaultTexSubImage2D) {
+ InitState init;
+ init.gl_version = "3.0";
+ InitDecoder(init);
+
+ BindTexture cmd1;
+ cmd1.Init(GL_TEXTURE_2D, 0);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, 0));
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TexSubImage2D cmd2;
+ cmd2.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleBindTexture) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_RECTANGLE_ARB, kNewServiceId));
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId));
+ BindTexture cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ Texture* texture = GetTexture(kNewClientId)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleGetBinding) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_,
+ GetIntegerv(GL_TEXTURE_BINDING_RECTANGLE_ARB, result->GetData()))
+ .Times(0);
+ result->size = 0;
+ GetIntegerv cmd;
+ cmd.Init(GL_TEXTURE_BINDING_RECTANGLE_ARB,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
+ GL_TEXTURE_BINDING_RECTANGLE_ARB),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(client_texture_id_, (uint32)result->GetData()[0]);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureDefaults) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureParam) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ EXPECT_CALL(*gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *gl_,
+ TexParameteri(
+ GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTextureParamInvalid) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB,
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ cmd.Init(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ Texture* texture = GetTexture(client_texture_id_)->texture();
+ EXPECT_TRUE(texture != NULL);
+ EXPECT_TRUE(texture->target() == GL_TEXTURE_RECTANGLE_ARB);
+ EXPECT_TRUE(texture->min_filter() == GL_LINEAR);
+ EXPECT_TRUE(texture->wrap_s() == GL_CLAMP_TO_EDGE);
+ EXPECT_TRUE(texture->wrap_t() == GL_CLAMP_TO_EDGE);
+}
+
+TEST_P(GLES2DecoderManualInitTest, ARBTextureRectangleTexImage2DError) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_rectangle";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ GLenum target = GL_TEXTURE_RECTANGLE_ARB;
+ GLint level = 0;
+ GLenum internal_format = GL_RGBA;
+ GLsizei width = 2;
+ GLsizei height = 4;
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ DoBindTexture(
+ GL_TEXTURE_RECTANGLE_ARB, client_texture_id_, kServiceTextureId);
+ ASSERT_TRUE(GetTexture(client_texture_id_) != NULL);
+ TexImage2D cmd;
+ cmd.Init(target,
+ level,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ // TexImage2D is not allowed with GL_TEXTURE_RECTANGLE_ARB targets.
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DClearsAfterTexImage2DNULL) {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DDoesNotClearAfterTexImage2DNULLThenData) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(
+ GLES2DecoderManualInitTest,
+ TexSubImage2DDoesNotClearAfterTexImage2DNULLThenDataWithTexImage2DIsFaster) {
+ CommandLine command_line(0, NULL);
+ command_line.AppendSwitchASCII(
+ switches::kGpuDriverBugWorkarounds,
+ base::IntToString(gpu::TEXSUBIMAGE2D_FASTER_THAN_TEXIMAGE2D));
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoderWithCommandLine(init, &command_line);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+
+ {
+ // Uses texSubimage internally because the above workaround is active and
+ // the update is for the full size of the texture.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(
+ GL_TEXTURE_2D, 0, 0, 0, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::TexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ // Test if we call it again it does not clear.
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, TexSubImage2DClearsAfterTexImage2DWithDataThenNULL) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Put in data (so it should be marked as cleared)
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ // Put in no data.
+ TexImage2D tex_cmd;
+ tex_cmd.Init(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ // It won't actually call TexImage2D, just mark it as uncleared.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ // Next call to TexSubImage2d should clear.
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderTest, CopyTexImage2DMarksTextureAsCleared) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, CopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 1, 1, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CopyTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+}
+
+TEST_P(GLES2DecoderTest, CopyTexSubImage2DClearsUnclearedTexture) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ 2,
+ 2);
+ EXPECT_CALL(*gl_, CopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1))
+ .Times(1)
+ .RetiresOnSaturation();
+ CopyTexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, CompressedImage2DMarksTextureAsCleared) {
+ InitState init;
+ init.extensions = "GL_EXT_texture_compression_s3tc";
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *gl_,
+ CompressedTexImage2D(
+ GL_TEXTURE_2D, 0, GL_COMPRESSED_RGB_S3TC_DXT1_EXT, 4, 4, 0, 8, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ CompressedTexImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT,
+ 4,
+ 4,
+ 8,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ EXPECT_TRUE(texture_ref->texture()->SafeToRenderFrom());
+}
+
+TEST_P(GLES2DecoderTest, TextureUsageAngleExtNotEnabledByDefault) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+
+ TexParameteri cmd;
+ cmd.Init(
+ GL_TEXTURE_2D, GL_TEXTURE_USAGE_ANGLE, GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, ProduceAndConsumeTextureCHROMIUM) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 2, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureCHROMIUMImmediate>();
+ produce_cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture didn't change.
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID has not changed.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ // Create new texture for consume.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgumentPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ DoBindTexture(GL_TEXTURE_2D, kNewClientId, kNewServiceId);
+
+ // Assigns and binds original service size texture ID.
+ EXPECT_CALL(*gl_, DeleteTextures(1, _)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ ConsumeTextureCHROMIUMImmediate& consume_cmd =
+ *GetImmediateAs<ConsumeTextureCHROMIUMImmediate>();
+ consume_cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(consume_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture is redefined.
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID is restored.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+}
+
+TEST_P(GLES2DecoderTest, ProduceAndConsumeDirectTextureCHROMIUM) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 1, GL_RGBA, 2, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureDirectCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureDirectCHROMIUMImmediate>();
+ produce_cmd.Init(client_texture_id_, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Texture didn't change.
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ // Service ID has not changed.
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ // Consume the texture into a new client ID.
+ GLuint new_texture_id = kNewClientId;
+ CreateAndConsumeTextureCHROMIUMImmediate& consume_cmd =
+ *GetImmediateAs<CreateAndConsumeTextureCHROMIUMImmediate>();
+ consume_cmd.Init(GL_TEXTURE_2D, new_texture_id, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(consume_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Make sure the new client ID is associated with the produced service ID.
+ texture_ref = group().texture_manager()->GetTexture(new_texture_id);
+ ASSERT_TRUE(texture_ref != NULL);
+ texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ DoBindTexture(GL_TEXTURE_2D, kNewClientId, kServiceTextureId);
+
+ // Texture is redefined.
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(2, width);
+ EXPECT_EQ(4, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+}
+
+TEST_P(GLES2DecoderTest, ProduceTextureCHROMIUMInvalidTarget) {
+ Mailbox mailbox = Mailbox::Generate();
+
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, 3, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ ProduceTextureDirectCHROMIUMImmediate& produce_cmd =
+ *GetImmediateAs<ProduceTextureDirectCHROMIUMImmediate>();
+ produce_cmd.Init(client_texture_id_, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+
+ // ProduceTexture should fail it the texture and produce targets don't match.
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, DepthTextureBadArgs) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ // Check trying to upload data fails.
+ TexImage2D tex_cmd;
+ tex_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // Try level > 0.
+ tex_cmd.Init(GL_TEXTURE_2D,
+ 1,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ // Make a 1 pixel depth texture.
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 1,
+ 1,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Check that trying to update it fails.
+ TexSubImage2D tex_sub_cmd;
+ tex_sub_cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(tex_sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Check that trying to CopyTexImage2D fails
+ CopyTexImage2D copy_tex_cmd;
+ copy_tex_cmd.Init(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_tex_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ // Check that trying to CopyTexSubImage2D fails
+ CopyTexSubImage2D copy_sub_cmd;
+ copy_sub_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 0, 0, 1, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(copy_sub_cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, GenerateMipmapDepthTexture) {
+ InitState init;
+ init.extensions = "GL_ANGLE_depth_texture";
+ init.gl_version = "opengl es 2.0";
+ init.has_depth = true;
+ init.has_stencil = true;
+ init.request_depth = true;
+ init.request_stencil = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_DEPTH_COMPONENT,
+ 2,
+ 2,
+ 0,
+ GL_DEPTH_COMPONENT,
+ GL_UNSIGNED_INT,
+ 0,
+ 0);
+ GenerateMipmap cmd;
+ cmd.Init(GL_TEXTURE_2D);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, BindTexImage2DCHROMIUM) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ EXPECT_FALSE(GetImageManager()->LookupImage(1) == NULL);
+
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Bind image to texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should now be set.
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Define new texture image.
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should no longer be set.
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+TEST_P(GLES2DecoderTest, BindTexImage2DCHROMIUMCubeMapNotAllowed) {
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_CUBE_MAP, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, OrphanGLImageWithTexImage2D) {
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_CUBE_MAP, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+TEST_P(GLES2DecoderTest, ReleaseTexImage2DCHROMIUM) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 3, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ GetImageManager()->AddImage(image.get(), 1);
+ EXPECT_FALSE(GetImageManager()->LookupImage(1) == NULL);
+
+ GLsizei width;
+ GLsizei height;
+ GLenum type;
+ GLenum internal_format;
+
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_EQ(3, width);
+ EXPECT_EQ(1, height);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &internal_format));
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), internal_format);
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Bind image to texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should now be set.
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Release image from texture.
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ ReleaseTexImage2DCHROMIUM release_tex_image_2d_cmd;
+ release_tex_image_2d_cmd.Init(GL_TEXTURE_2D, 1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(release_tex_image_2d_cmd));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ // Image should no longer be set.
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+}
+
+class MockGLImage : public gfx::GLImage {
+ public:
+ MockGLImage() {}
+
+ // Overridden from gfx::GLImage:
+ MOCK_METHOD0(GetSize, gfx::Size());
+ MOCK_METHOD1(Destroy, void(bool));
+ MOCK_METHOD1(BindTexImage, bool(unsigned));
+ MOCK_METHOD1(ReleaseTexImage, void(unsigned));
+ MOCK_METHOD1(CopyTexImage, bool(unsigned));
+ MOCK_METHOD0(WillUseTexImage, void());
+ MOCK_METHOD0(DidUseTexImage, void());
+ MOCK_METHOD0(WillModifyTexImage, void());
+ MOCK_METHOD0(DidModifyTexImage, void());
+ MOCK_METHOD5(ScheduleOverlayPlane, bool(gfx::AcceleratedWidget,
+ int,
+ gfx::OverlayTransform,
+ const gfx::Rect&,
+ const gfx::RectF&));
+
+ protected:
+ virtual ~MockGLImage() {}
+};
+
+TEST_P(GLES2DecoderWithShaderTest, UseTexImage) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+
+ TextureRef* texture_ref =
+ group().texture_manager()->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(kServiceTextureId, texture->service_id());
+
+ const int32 kImageId = 1;
+ scoped_refptr<MockGLImage> image(new MockGLImage);
+ GetImageManager()->AddImage(image.get(), kImageId);
+
+ // Bind image to texture.
+ EXPECT_CALL(*image.get(), BindTexImage(GL_TEXTURE_2D))
+ .Times(1)
+ .WillOnce(Return(true))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), GetSize())
+ .Times(1)
+ .WillOnce(Return(gfx::Size(1, 1)))
+ .RetiresOnSaturation();
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, kImageId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+
+ AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(3).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(2)
+ .RetiresOnSaturation();
+ // Image will be 'in use' as long as bound to a framebuffer.
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ kServiceTextureId,
+ 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ FramebufferTexture2D fbtex_cmd;
+ fbtex_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // ScopedGLErrorSuppressor calls GetError on its constructor and destructor.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ kServiceRenderbufferId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kServiceTextureId))
+ .Times(2)
+ .RetiresOnSaturation();
+ // Image should no longer be 'in use' after being unbound from framebuffer.
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ FramebufferRenderbuffer fbrb_cmd;
+ fbrb_cmd.Init(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ client_renderbuffer_id_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
+}
+
+TEST_P(GLES2DecoderManualInitTest, DrawWithGLImageExternal) {
+ InitState init;
+ init.extensions = "GL_OES_EGL_image_external";
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ TextureRef* texture_ref = GetTexture(client_texture_id_);
+ scoped_refptr<MockGLImage> image(new MockGLImage);
+ group().texture_manager()->SetTarget(texture_ref, GL_TEXTURE_EXTERNAL_OES);
+ group().texture_manager()->SetLevelInfo(texture_ref,
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ group().texture_manager()->SetLevelImage(
+ texture_ref, GL_TEXTURE_EXTERNAL_OES, 0, image.get());
+
+ DoBindTexture(GL_TEXTURE_EXTERNAL_OES, client_texture_id_, kServiceTextureId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ SetupSamplerExternalProgram();
+ SetupIndexBuffer();
+ AddExpectationsForSimulatedAttrib0(kMaxValidIndex + 1, 0);
+ SetupExpectationsForApplyingDefaultDirtyState();
+ EXPECT_TRUE(group().texture_manager()->CanRender(texture_ref));
+
+ InSequence s;
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), WillUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, DrawElements(_, _, _, _)).Times(1);
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*image.get(), DidUseTexImage()).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+ DrawElements cmd;
+ cmd.Init(GL_TRIANGLES,
+ kValidIndexRangeCount,
+ GL_UNSIGNED_SHORT,
+ kValidIndexRangeStart * 2);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatOnGLES2) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float";
+ init.gl_version = "opengl es 2.0";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_LUMINANCE, 16, 17, 0, GL_LUMINANCE, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 16, 17, 0, GL_ALPHA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0);
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA32F, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_LUMINANCE, 16, 17, 0, GL_LUMINANCE, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 16, 17, 0, GL_ALPHA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0);
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DFloatOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ const int kWidth = 8;
+ const int kHeight = 4;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0);
+ EXPECT_CALL(*gl_,
+ TexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 0,
+ 0,
+ kWidth,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexSubImage2DFloatDoesClearOnGLES3) {
+ InitState init;
+ init.extensions = "GL_OES_texture_float GL_EXT_color_buffer_float";
+ init.gl_version = "opengl es 3.0";
+ InitDecoder(init);
+ const int kWidth = 8;
+ const int kHeight = 4;
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ kWidth,
+ kHeight,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0);
+ SetupClearTextureExpectations(kServiceTextureId,
+ kServiceTextureId,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA32F,
+ GL_RGBA,
+ GL_FLOAT,
+ kWidth,
+ kHeight);
+ EXPECT_CALL(*gl_,
+ TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ shared_memory_address_))
+ .Times(1)
+ .RetiresOnSaturation();
+ TexSubImage2D cmd;
+ cmd.Init(GL_TEXTURE_2D,
+ 0,
+ 1,
+ 0,
+ kWidth - 1,
+ kHeight,
+ GL_RGBA,
+ GL_FLOAT,
+ kSharedMemoryId,
+ kSharedMemoryOffset,
+ GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatConvertsFormatDesktop) {
+ InitState init;
+ init.extensions = "GL_ARB_texture_float";
+ init.gl_version = "2.1";
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA32F, 16, 17, 0, GL_RGBA, GL_FLOAT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, 16, 17, 0, GL_RGB, GL_FLOAT, 0, 0);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 17,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_RGBA32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_RGB,
+ 16,
+ 17,
+ 0,
+ GL_RGB,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_RGB32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_LUMINANCE32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_ALPHA32F_ARB);
+ DoTexImage2DConvertInternalFormat(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ 16,
+ 17,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_FLOAT,
+ 0,
+ 0,
+ GL_LUMINANCE_ALPHA32F_ARB);
+}
+
+class GLES2DecoderCompressedFormatsTest : public GLES2DecoderManualInitTest {
+ public:
+ GLES2DecoderCompressedFormatsTest() {}
+
+ static bool ValueInArray(GLint value, GLint* array, GLint count) {
+ for (GLint ii = 0; ii < count; ++ii) {
+ if (array[ii] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void CheckFormats(const char* extension, const GLenum* formats, int count) {
+ InitState init;
+ init.extensions = extension;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ GLint num_formats = result->GetData()[0];
+ EXPECT_EQ(count, num_formats);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ cmd.Init(GL_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(num_formats, result->GetNumResults());
+
+ for (int i = 0; i < count; ++i) {
+ EXPECT_TRUE(
+ ValueInArray(formats[i], result->GetData(), result->GetNumResults()));
+ }
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderCompressedFormatsTest,
+ ::testing::Bool());
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsS3TC) {
+ const GLenum formats[] = {
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT};
+ CheckFormats("GL_EXT_texture_compression_s3tc", formats, 4);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsATC) {
+ const GLenum formats[] = {GL_ATC_RGB_AMD, GL_ATC_RGBA_EXPLICIT_ALPHA_AMD,
+ GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD};
+ CheckFormats("GL_AMD_compressed_ATC_texture", formats, 3);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsPVRTC) {
+ const GLenum formats[] = {
+ GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG, GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG,
+ GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG, GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG};
+ CheckFormats("GL_IMG_texture_compression_pvrtc", formats, 4);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsETC1) {
+ const GLenum formats[] = {GL_ETC1_RGB8_OES};
+ CheckFormats("GL_OES_compressed_ETC1_RGB8_texture", formats, 1);
+}
+
+TEST_P(GLES2DecoderManualInitTest, GetNoCompressedTextureFormats) {
+ InitState init;
+ init.gl_version = "3.0";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ GetIntegerv cmd;
+ result->size = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
+ cmd.Init(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ GLint num_formats = result->GetData()[0];
+ EXPECT_EQ(0, num_formats);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ result->size = 0;
+ cmd.Init(
+ GL_COMPRESSED_TEXTURE_FORMATS, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(num_formats, result->GetNumResults());
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+// TODO(gman): Complete this test.
+// TEST_P(GLES2DecoderTest, CompressedTexImage2DGLError) {
+// }
+
+// TODO(gman): CompressedTexImage2D
+
+// TODO(gman): CompressedTexImage2DImmediate
+
+// TODO(gman): CompressedTexSubImage2DImmediate
+
+// TODO(gman): TexImage2D
+
+// TODO(gman): TexImage2DImmediate
+
+// TODO(gman): TexSubImage2DImmediate
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_validation.cc b/gpu/command_buffer/service/gles2_cmd_validation.cc
new file mode 100644
index 0000000..8d4fd71
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains various validation functions for the GLES2 service.
+
+#include "base/basictypes.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+#include "gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/service/gles2_cmd_validation.h b/gpu/command_buffer/service/gles2_cmd_validation.h
new file mode 100644
index 0000000..22ee2da
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains various validation functions for the GLES2 service.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+
+#include <algorithm>
+#include <vector>
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+namespace gles2 {
+
+// ValueValidator returns true if a value is valid.
+template <typename T>
+class ValueValidator {
+ public:
+ ValueValidator() {}
+
+ ValueValidator(const T* valid_values, int num_values) {
+ for (int ii = 0; ii < num_values; ++ii) {
+ AddValue(valid_values[ii]);
+ }
+ }
+
+ void AddValue(const T value) {
+ if (!IsValid(value)) {
+ valid_values_.push_back(value);
+ }
+ }
+
+ bool IsValid(const T value) const {
+ return std::find(valid_values_.begin(), valid_values_.end(), value) !=
+ valid_values_.end();
+ }
+
+ const std::vector<T>& GetValues() const {
+ return valid_values_;
+ }
+
+ private:
+ std::vector<T> valid_values_;
+};
+
+struct Validators {
+ Validators();
+#include "gpu/command_buffer/service/gles2_cmd_validation_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_H_
+
diff --git a/gpu/command_buffer/service/gles2_cmd_validation_autogen.h b/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
new file mode 100644
index 0000000..de84037
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
+
+ValueValidator<GLenum> attachment;
+ValueValidator<GLenum> backbuffer_attachment;
+ValueValidator<GLenum> blit_filter;
+ValueValidator<GLenum> buffer_parameter;
+ValueValidator<GLenum> buffer_target;
+ValueValidator<GLenum> buffer_usage;
+ValueValidator<GLenum> capability;
+ValueValidator<GLenum> cmp_function;
+ValueValidator<GLenum> compressed_texture_format;
+ValueValidator<GLenum> draw_mode;
+ValueValidator<GLenum> dst_blend_factor;
+ValueValidator<GLenum> equation;
+ValueValidator<GLenum> face_mode;
+ValueValidator<GLenum> face_type;
+ValueValidator<GLenum> frame_buffer_parameter;
+ValueValidator<GLenum> frame_buffer_target;
+ValueValidator<GLenum> g_l_state;
+ValueValidator<GLenum> get_max_index_type;
+ValueValidator<GLenum> get_tex_param_target;
+ValueValidator<GLenum> hint_mode;
+ValueValidator<GLenum> hint_target;
+ValueValidator<GLenum> image_internal_format;
+ValueValidator<GLenum> image_usage;
+ValueValidator<GLenum> index_type;
+ValueValidator<GLenum> matrix_mode;
+ValueValidator<GLenum> pixel_store;
+ValueValidator<GLint> pixel_store_alignment;
+ValueValidator<GLenum> pixel_type;
+ValueValidator<GLenum> program_parameter;
+ValueValidator<GLenum> query_object_parameter;
+ValueValidator<GLenum> query_parameter;
+ValueValidator<GLenum> query_target;
+ValueValidator<GLenum> read_pixel_format;
+ValueValidator<GLenum> read_pixel_type;
+ValueValidator<GLenum> render_buffer_format;
+ValueValidator<GLenum> render_buffer_parameter;
+ValueValidator<GLenum> render_buffer_target;
+ValueValidator<GLenum> reset_status;
+ValueValidator<GLenum> shader_binary_format;
+ValueValidator<GLenum> shader_parameter;
+ValueValidator<GLenum> shader_precision;
+ValueValidator<GLenum> shader_type;
+ValueValidator<GLenum> src_blend_factor;
+ValueValidator<GLenum> stencil_op;
+ValueValidator<GLenum> string_type;
+ValueValidator<GLenum> texture_bind_target;
+ValueValidator<GLenum> texture_format;
+ValueValidator<GLenum> texture_internal_format;
+ValueValidator<GLenum> texture_internal_format_storage;
+ValueValidator<GLenum> texture_mag_filter_mode;
+ValueValidator<GLenum> texture_min_filter_mode;
+ValueValidator<GLenum> texture_parameter;
+ValueValidator<GLenum> texture_pool;
+ValueValidator<GLenum> texture_target;
+ValueValidator<GLenum> texture_usage;
+ValueValidator<GLenum> texture_wrap_mode;
+ValueValidator<GLint> vertex_attrib_size;
+ValueValidator<GLenum> vertex_attrib_type;
+ValueValidator<GLenum> vertex_attribute;
+ValueValidator<GLenum> vertex_pointer;
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
new file mode 100644
index 0000000..790b9b3
--- /dev/null
+++ b/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -0,0 +1,629 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
+
+static const GLenum valid_attachment_table[] = {
+ GL_COLOR_ATTACHMENT0,
+ GL_DEPTH_ATTACHMENT,
+ GL_STENCIL_ATTACHMENT,
+};
+
+static const GLenum valid_backbuffer_attachment_table[] = {
+ GL_COLOR_EXT,
+ GL_DEPTH_EXT,
+ GL_STENCIL_EXT,
+};
+
+static const GLenum valid_blit_filter_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+};
+
+static const GLenum valid_buffer_parameter_table[] = {
+ GL_BUFFER_SIZE,
+ GL_BUFFER_USAGE,
+};
+
+static const GLenum valid_buffer_target_table[] = {
+ GL_ARRAY_BUFFER,
+ GL_ELEMENT_ARRAY_BUFFER,
+};
+
+static const GLenum valid_buffer_usage_table[] = {
+ GL_STREAM_DRAW,
+ GL_STATIC_DRAW,
+ GL_DYNAMIC_DRAW,
+};
+
+static const GLenum valid_capability_table[] = {
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_DITHER,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+};
+
+static const GLenum valid_cmp_function_table[] = {
+ GL_NEVER,
+ GL_LESS,
+ GL_EQUAL,
+ GL_LEQUAL,
+ GL_GREATER,
+ GL_NOTEQUAL,
+ GL_GEQUAL,
+ GL_ALWAYS,
+};
+
+static const GLenum valid_draw_mode_table[] = {
+ GL_POINTS,
+ GL_LINE_STRIP,
+ GL_LINE_LOOP,
+ GL_LINES,
+ GL_TRIANGLE_STRIP,
+ GL_TRIANGLE_FAN,
+ GL_TRIANGLES,
+};
+
+static const GLenum valid_dst_blend_factor_table[] = {
+ GL_ZERO,
+ GL_ONE,
+ GL_SRC_COLOR,
+ GL_ONE_MINUS_SRC_COLOR,
+ GL_DST_COLOR,
+ GL_ONE_MINUS_DST_COLOR,
+ GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA,
+ GL_DST_ALPHA,
+ GL_ONE_MINUS_DST_ALPHA,
+ GL_CONSTANT_COLOR,
+ GL_ONE_MINUS_CONSTANT_COLOR,
+ GL_CONSTANT_ALPHA,
+ GL_ONE_MINUS_CONSTANT_ALPHA,
+};
+
+static const GLenum valid_equation_table[] = {
+ GL_FUNC_ADD,
+ GL_FUNC_SUBTRACT,
+ GL_FUNC_REVERSE_SUBTRACT,
+};
+
+static const GLenum valid_face_mode_table[] = {
+ GL_CW,
+ GL_CCW,
+};
+
+static const GLenum valid_face_type_table[] = {
+ GL_FRONT,
+ GL_BACK,
+ GL_FRONT_AND_BACK,
+};
+
+static const GLenum valid_frame_buffer_parameter_table[] = {
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE,
+};
+
+static const GLenum valid_frame_buffer_target_table[] = {
+ GL_FRAMEBUFFER,
+};
+
+static const GLenum valid_g_l_state_table[] = {
+ GL_ACTIVE_TEXTURE,
+ GL_ALIASED_LINE_WIDTH_RANGE,
+ GL_ALIASED_POINT_SIZE_RANGE,
+ GL_ALPHA_BITS,
+ GL_ARRAY_BUFFER_BINDING,
+ GL_BLUE_BITS,
+ GL_COMPRESSED_TEXTURE_FORMATS,
+ GL_CURRENT_PROGRAM,
+ GL_DEPTH_BITS,
+ GL_DEPTH_RANGE,
+ GL_ELEMENT_ARRAY_BUFFER_BINDING,
+ GL_FRAMEBUFFER_BINDING,
+ GL_GENERATE_MIPMAP_HINT,
+ GL_GREEN_BITS,
+ GL_IMPLEMENTATION_COLOR_READ_FORMAT,
+ GL_IMPLEMENTATION_COLOR_READ_TYPE,
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE,
+ GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ GL_MAX_RENDERBUFFER_SIZE,
+ GL_MAX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_TEXTURE_SIZE,
+ GL_MAX_VARYING_VECTORS,
+ GL_MAX_VERTEX_ATTRIBS,
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_VERTEX_UNIFORM_VECTORS,
+ GL_MAX_VIEWPORT_DIMS,
+ GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ GL_NUM_SHADER_BINARY_FORMATS,
+ GL_PACK_ALIGNMENT,
+ GL_RED_BITS,
+ GL_RENDERBUFFER_BINDING,
+ GL_SAMPLE_BUFFERS,
+ GL_SAMPLE_COVERAGE_INVERT,
+ GL_SAMPLE_COVERAGE_VALUE,
+ GL_SAMPLES,
+ GL_SCISSOR_BOX,
+ GL_SHADER_BINARY_FORMATS,
+ GL_SHADER_COMPILER,
+ GL_SUBPIXEL_BITS,
+ GL_STENCIL_BITS,
+ GL_TEXTURE_BINDING_2D,
+ GL_TEXTURE_BINDING_CUBE_MAP,
+ GL_UNPACK_ALIGNMENT,
+ GL_UNPACK_FLIP_Y_CHROMIUM,
+ GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+ GL_BIND_GENERATES_RESOURCE_CHROMIUM,
+ GL_VERTEX_ARRAY_BINDING_OES,
+ GL_VIEWPORT,
+ GL_BLEND_COLOR,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_CLEAR_VALUE,
+ GL_DEPTH_CLEAR_VALUE,
+ GL_STENCIL_CLEAR_VALUE,
+ GL_COLOR_WRITEMASK,
+ GL_CULL_FACE_MODE,
+ GL_DEPTH_FUNC,
+ GL_DEPTH_WRITEMASK,
+ GL_FRONT_FACE,
+ GL_LINE_WIDTH,
+ GL_POLYGON_OFFSET_FACTOR,
+ GL_POLYGON_OFFSET_UNITS,
+ GL_STENCIL_FUNC,
+ GL_STENCIL_REF,
+ GL_STENCIL_VALUE_MASK,
+ GL_STENCIL_BACK_FUNC,
+ GL_STENCIL_BACK_REF,
+ GL_STENCIL_BACK_VALUE_MASK,
+ GL_STENCIL_WRITEMASK,
+ GL_STENCIL_BACK_WRITEMASK,
+ GL_STENCIL_FAIL,
+ GL_STENCIL_PASS_DEPTH_FAIL,
+ GL_STENCIL_PASS_DEPTH_PASS,
+ GL_STENCIL_BACK_FAIL,
+ GL_STENCIL_BACK_PASS_DEPTH_FAIL,
+ GL_STENCIL_BACK_PASS_DEPTH_PASS,
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_DITHER,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+};
+
+static const GLenum valid_get_max_index_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+ GL_UNSIGNED_INT,
+};
+
+static const GLenum valid_get_tex_param_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP,
+};
+
+static const GLenum valid_hint_mode_table[] = {
+ GL_FASTEST,
+ GL_NICEST,
+ GL_DONT_CARE,
+};
+
+static const GLenum valid_hint_target_table[] = {
+ GL_GENERATE_MIPMAP_HINT,
+};
+
+static const GLenum valid_image_internal_format_table[] = {
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_image_usage_table[] = {
+ GL_MAP_CHROMIUM,
+ GL_SCANOUT_CHROMIUM,
+};
+
+static const GLenum valid_index_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT,
+};
+
+static const GLenum valid_matrix_mode_table[] = {
+ GL_PATH_PROJECTION_CHROMIUM,
+ GL_PATH_MODELVIEW_CHROMIUM,
+};
+
+static const GLenum valid_pixel_store_table[] = {
+ GL_PACK_ALIGNMENT,
+ GL_UNPACK_ALIGNMENT,
+ GL_UNPACK_FLIP_Y_CHROMIUM,
+ GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
+ GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM,
+};
+
+static const GLint valid_pixel_store_alignment_table[] = {
+ 1,
+ 2,
+ 4,
+ 8,
+};
+
+static const GLenum valid_pixel_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+};
+
+static const GLenum valid_program_parameter_table[] = {
+ GL_DELETE_STATUS,
+ GL_LINK_STATUS,
+ GL_VALIDATE_STATUS,
+ GL_INFO_LOG_LENGTH,
+ GL_ATTACHED_SHADERS,
+ GL_ACTIVE_ATTRIBUTES,
+ GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
+ GL_ACTIVE_UNIFORMS,
+ GL_ACTIVE_UNIFORM_MAX_LENGTH,
+};
+
+static const GLenum valid_query_object_parameter_table[] = {
+ GL_QUERY_RESULT_EXT,
+ GL_QUERY_RESULT_AVAILABLE_EXT,
+};
+
+static const GLenum valid_query_parameter_table[] = {
+ GL_CURRENT_QUERY_EXT,
+};
+
+static const GLenum valid_query_target_table[] = {
+ GL_ANY_SAMPLES_PASSED_EXT,
+ GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT,
+ GL_COMMANDS_ISSUED_CHROMIUM,
+ GL_LATENCY_QUERY_CHROMIUM,
+ GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM,
+ GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM,
+ GL_COMMANDS_COMPLETED_CHROMIUM,
+};
+
+static const GLenum valid_read_pixel_format_table[] = {
+ GL_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_read_pixel_type_table[] = {
+ GL_UNSIGNED_BYTE,
+ GL_UNSIGNED_SHORT_5_6_5,
+ GL_UNSIGNED_SHORT_4_4_4_4,
+ GL_UNSIGNED_SHORT_5_5_5_1,
+};
+
+static const GLenum valid_render_buffer_format_table[] = {
+ GL_RGBA4,
+ GL_RGB565,
+ GL_RGB5_A1,
+ GL_DEPTH_COMPONENT16,
+ GL_STENCIL_INDEX8,
+};
+
+static const GLenum valid_render_buffer_parameter_table[] = {
+ GL_RENDERBUFFER_RED_SIZE,
+ GL_RENDERBUFFER_GREEN_SIZE,
+ GL_RENDERBUFFER_BLUE_SIZE,
+ GL_RENDERBUFFER_ALPHA_SIZE,
+ GL_RENDERBUFFER_DEPTH_SIZE,
+ GL_RENDERBUFFER_STENCIL_SIZE,
+ GL_RENDERBUFFER_WIDTH,
+ GL_RENDERBUFFER_HEIGHT,
+ GL_RENDERBUFFER_INTERNAL_FORMAT,
+};
+
+static const GLenum valid_render_buffer_target_table[] = {
+ GL_RENDERBUFFER,
+};
+
+static const GLenum valid_reset_status_table[] = {
+ GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_INNOCENT_CONTEXT_RESET_ARB,
+ GL_UNKNOWN_CONTEXT_RESET_ARB,
+};
+
+static const GLenum valid_shader_parameter_table[] = {
+ GL_SHADER_TYPE,
+ GL_DELETE_STATUS,
+ GL_COMPILE_STATUS,
+ GL_INFO_LOG_LENGTH,
+ GL_SHADER_SOURCE_LENGTH,
+ GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+};
+
+static const GLenum valid_shader_precision_table[] = {
+ GL_LOW_FLOAT,
+ GL_MEDIUM_FLOAT,
+ GL_HIGH_FLOAT,
+ GL_LOW_INT,
+ GL_MEDIUM_INT,
+ GL_HIGH_INT,
+};
+
+static const GLenum valid_shader_type_table[] = {
+ GL_VERTEX_SHADER,
+ GL_FRAGMENT_SHADER,
+};
+
+static const GLenum valid_src_blend_factor_table[] = {
+ GL_ZERO,
+ GL_ONE,
+ GL_SRC_COLOR,
+ GL_ONE_MINUS_SRC_COLOR,
+ GL_DST_COLOR,
+ GL_ONE_MINUS_DST_COLOR,
+ GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA,
+ GL_DST_ALPHA,
+ GL_ONE_MINUS_DST_ALPHA,
+ GL_CONSTANT_COLOR,
+ GL_ONE_MINUS_CONSTANT_COLOR,
+ GL_CONSTANT_ALPHA,
+ GL_ONE_MINUS_CONSTANT_ALPHA,
+ GL_SRC_ALPHA_SATURATE,
+};
+
+static const GLenum valid_stencil_op_table[] = {
+ GL_KEEP,
+ GL_ZERO,
+ GL_REPLACE,
+ GL_INCR,
+ GL_INCR_WRAP,
+ GL_DECR,
+ GL_DECR_WRAP,
+ GL_INVERT,
+};
+
+static const GLenum valid_string_type_table[] = {
+ GL_VENDOR,
+ GL_RENDERER,
+ GL_VERSION,
+ GL_SHADING_LANGUAGE_VERSION,
+ GL_EXTENSIONS,
+};
+
+static const GLenum valid_texture_bind_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP,
+};
+
+static const GLenum valid_texture_format_table[] = {
+ GL_ALPHA,
+ GL_LUMINANCE,
+ GL_LUMINANCE_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_texture_internal_format_table[] = {
+ GL_ALPHA,
+ GL_LUMINANCE,
+ GL_LUMINANCE_ALPHA,
+ GL_RGB,
+ GL_RGBA,
+};
+
+static const GLenum valid_texture_internal_format_storage_table[] = {
+ GL_RGB565,
+ GL_RGBA4,
+ GL_RGB5_A1,
+ GL_ALPHA8_EXT,
+ GL_LUMINANCE8_EXT,
+ GL_LUMINANCE8_ALPHA8_EXT,
+ GL_RGB8_OES,
+ GL_RGBA8_OES,
+};
+
+static const GLenum valid_texture_mag_filter_mode_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+};
+
+static const GLenum valid_texture_min_filter_mode_table[] = {
+ GL_NEAREST,
+ GL_LINEAR,
+ GL_NEAREST_MIPMAP_NEAREST,
+ GL_LINEAR_MIPMAP_NEAREST,
+ GL_NEAREST_MIPMAP_LINEAR,
+ GL_LINEAR_MIPMAP_LINEAR,
+};
+
+static const GLenum valid_texture_parameter_table[] = {
+ GL_TEXTURE_MAG_FILTER,
+ GL_TEXTURE_MIN_FILTER,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_WRAP_S,
+ GL_TEXTURE_WRAP_T,
+};
+
+static const GLenum valid_texture_pool_table[] = {
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM,
+};
+
+static const GLenum valid_texture_target_table[] = {
+ GL_TEXTURE_2D,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+};
+
+static const GLenum valid_texture_usage_table[] = {
+ GL_NONE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE,
+};
+
+static const GLenum valid_texture_wrap_mode_table[] = {
+ GL_CLAMP_TO_EDGE,
+ GL_MIRRORED_REPEAT,
+ GL_REPEAT,
+};
+
+static const GLint valid_vertex_attrib_size_table[] = {
+ 1,
+ 2,
+ 3,
+ 4,
+};
+
+static const GLenum valid_vertex_attrib_type_table[] = {
+ GL_BYTE,
+ GL_UNSIGNED_BYTE,
+ GL_SHORT,
+ GL_UNSIGNED_SHORT,
+ GL_FLOAT,
+};
+
+static const GLenum valid_vertex_attribute_table[] = {
+ GL_VERTEX_ATTRIB_ARRAY_NORMALIZED,
+ GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING,
+ GL_VERTEX_ATTRIB_ARRAY_ENABLED,
+ GL_VERTEX_ATTRIB_ARRAY_SIZE,
+ GL_VERTEX_ATTRIB_ARRAY_STRIDE,
+ GL_VERTEX_ATTRIB_ARRAY_TYPE,
+ GL_CURRENT_VERTEX_ATTRIB,
+};
+
+static const GLenum valid_vertex_pointer_table[] = {
+ GL_VERTEX_ATTRIB_ARRAY_POINTER,
+};
+
+Validators::Validators()
+ : attachment(valid_attachment_table, arraysize(valid_attachment_table)),
+ backbuffer_attachment(valid_backbuffer_attachment_table,
+ arraysize(valid_backbuffer_attachment_table)),
+ blit_filter(valid_blit_filter_table, arraysize(valid_blit_filter_table)),
+ buffer_parameter(valid_buffer_parameter_table,
+ arraysize(valid_buffer_parameter_table)),
+ buffer_target(valid_buffer_target_table,
+ arraysize(valid_buffer_target_table)),
+ buffer_usage(valid_buffer_usage_table,
+ arraysize(valid_buffer_usage_table)),
+ capability(valid_capability_table, arraysize(valid_capability_table)),
+ cmp_function(valid_cmp_function_table,
+ arraysize(valid_cmp_function_table)),
+ compressed_texture_format(),
+ draw_mode(valid_draw_mode_table, arraysize(valid_draw_mode_table)),
+ dst_blend_factor(valid_dst_blend_factor_table,
+ arraysize(valid_dst_blend_factor_table)),
+ equation(valid_equation_table, arraysize(valid_equation_table)),
+ face_mode(valid_face_mode_table, arraysize(valid_face_mode_table)),
+ face_type(valid_face_type_table, arraysize(valid_face_type_table)),
+ frame_buffer_parameter(valid_frame_buffer_parameter_table,
+ arraysize(valid_frame_buffer_parameter_table)),
+ frame_buffer_target(valid_frame_buffer_target_table,
+ arraysize(valid_frame_buffer_target_table)),
+ g_l_state(valid_g_l_state_table, arraysize(valid_g_l_state_table)),
+ get_max_index_type(valid_get_max_index_type_table,
+ arraysize(valid_get_max_index_type_table)),
+ get_tex_param_target(valid_get_tex_param_target_table,
+ arraysize(valid_get_tex_param_target_table)),
+ hint_mode(valid_hint_mode_table, arraysize(valid_hint_mode_table)),
+ hint_target(valid_hint_target_table, arraysize(valid_hint_target_table)),
+ image_internal_format(valid_image_internal_format_table,
+ arraysize(valid_image_internal_format_table)),
+ image_usage(valid_image_usage_table, arraysize(valid_image_usage_table)),
+ index_type(valid_index_type_table, arraysize(valid_index_type_table)),
+ matrix_mode(valid_matrix_mode_table, arraysize(valid_matrix_mode_table)),
+ pixel_store(valid_pixel_store_table, arraysize(valid_pixel_store_table)),
+ pixel_store_alignment(valid_pixel_store_alignment_table,
+ arraysize(valid_pixel_store_alignment_table)),
+ pixel_type(valid_pixel_type_table, arraysize(valid_pixel_type_table)),
+ program_parameter(valid_program_parameter_table,
+ arraysize(valid_program_parameter_table)),
+ query_object_parameter(valid_query_object_parameter_table,
+ arraysize(valid_query_object_parameter_table)),
+ query_parameter(valid_query_parameter_table,
+ arraysize(valid_query_parameter_table)),
+ query_target(valid_query_target_table,
+ arraysize(valid_query_target_table)),
+ read_pixel_format(valid_read_pixel_format_table,
+ arraysize(valid_read_pixel_format_table)),
+ read_pixel_type(valid_read_pixel_type_table,
+ arraysize(valid_read_pixel_type_table)),
+ render_buffer_format(valid_render_buffer_format_table,
+ arraysize(valid_render_buffer_format_table)),
+ render_buffer_parameter(valid_render_buffer_parameter_table,
+ arraysize(valid_render_buffer_parameter_table)),
+ render_buffer_target(valid_render_buffer_target_table,
+ arraysize(valid_render_buffer_target_table)),
+ reset_status(valid_reset_status_table,
+ arraysize(valid_reset_status_table)),
+ shader_binary_format(),
+ shader_parameter(valid_shader_parameter_table,
+ arraysize(valid_shader_parameter_table)),
+ shader_precision(valid_shader_precision_table,
+ arraysize(valid_shader_precision_table)),
+ shader_type(valid_shader_type_table, arraysize(valid_shader_type_table)),
+ src_blend_factor(valid_src_blend_factor_table,
+ arraysize(valid_src_blend_factor_table)),
+ stencil_op(valid_stencil_op_table, arraysize(valid_stencil_op_table)),
+ string_type(valid_string_type_table, arraysize(valid_string_type_table)),
+ texture_bind_target(valid_texture_bind_target_table,
+ arraysize(valid_texture_bind_target_table)),
+ texture_format(valid_texture_format_table,
+ arraysize(valid_texture_format_table)),
+ texture_internal_format(valid_texture_internal_format_table,
+ arraysize(valid_texture_internal_format_table)),
+ texture_internal_format_storage(
+ valid_texture_internal_format_storage_table,
+ arraysize(valid_texture_internal_format_storage_table)),
+ texture_mag_filter_mode(valid_texture_mag_filter_mode_table,
+ arraysize(valid_texture_mag_filter_mode_table)),
+ texture_min_filter_mode(valid_texture_min_filter_mode_table,
+ arraysize(valid_texture_min_filter_mode_table)),
+ texture_parameter(valid_texture_parameter_table,
+ arraysize(valid_texture_parameter_table)),
+ texture_pool(valid_texture_pool_table,
+ arraysize(valid_texture_pool_table)),
+ texture_target(valid_texture_target_table,
+ arraysize(valid_texture_target_table)),
+ texture_usage(valid_texture_usage_table,
+ arraysize(valid_texture_usage_table)),
+ texture_wrap_mode(valid_texture_wrap_mode_table,
+ arraysize(valid_texture_wrap_mode_table)),
+ vertex_attrib_size(valid_vertex_attrib_size_table,
+ arraysize(valid_vertex_attrib_size_table)),
+ vertex_attrib_type(valid_vertex_attrib_type_table,
+ arraysize(valid_vertex_attrib_type_table)),
+ vertex_attribute(valid_vertex_attribute_table,
+ arraysize(valid_vertex_attribute_table)),
+ vertex_pointer(valid_vertex_pointer_table,
+ arraysize(valid_vertex_pointer_table)) {
+}
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
new file mode 100644
index 0000000..015d808
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -0,0 +1,310 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_switches.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+const int64 kUnscheduleFenceTimeOutDelay = 10000;
+
+#if defined(OS_WIN)
+const int64 kRescheduleTimeOutDelay = 1000;
+#endif
+
+GpuScheduler::GpuScheduler(CommandBufferServiceBase* command_buffer,
+ AsyncAPIInterface* handler,
+ gles2::GLES2Decoder* decoder)
+ : command_buffer_(command_buffer),
+ handler_(handler),
+ decoder_(decoder),
+ unscheduled_count_(0),
+ rescheduled_count_(0),
+ was_preempted_(false),
+ reschedule_task_factory_(this) {}
+
+GpuScheduler::~GpuScheduler() {
+}
+
+void GpuScheduler::PutChanged() {
+ TRACE_EVENT1(
+ "gpu", "GpuScheduler:PutChanged",
+ "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
+
+ CommandBuffer::State state = command_buffer_->GetLastState();
+
+ // If there is no parser, exit.
+ if (!parser_.get()) {
+ DCHECK_EQ(state.get_offset, state.put_offset);
+ return;
+ }
+
+ parser_->set_put(state.put_offset);
+ if (state.error != error::kNoError)
+ return;
+
+ // Check that the GPU has passed all fences.
+ if (!PollUnscheduleFences())
+ return;
+
+ // One of the unschedule fence tasks might have unscheduled us.
+ if (!IsScheduled())
+ return;
+
+ base::TimeTicks begin_time(base::TimeTicks::HighResNow());
+ error::Error error = error::kNoError;
+ if (decoder_)
+ decoder_->BeginDecoding();
+ while (!parser_->IsEmpty()) {
+ if (IsPreempted())
+ break;
+
+ DCHECK(IsScheduled());
+ DCHECK(unschedule_fences_.empty());
+
+ error = parser_->ProcessCommands(CommandParser::kParseCommandsSlice);
+
+ if (error == error::kDeferCommandUntilLater) {
+ DCHECK_GT(unscheduled_count_, 0);
+ break;
+ }
+
+ // TODO(piman): various classes duplicate various pieces of state, leading
+ // to needlessly complex update logic. It should be possible to simply
+ // share the state across all of them.
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+
+ if (error::IsError(error)) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error);
+ break;
+ }
+
+ if (!command_processed_callback_.is_null())
+ command_processed_callback_.Run();
+
+ if (unscheduled_count_ > 0)
+ break;
+ }
+
+ if (decoder_) {
+ if (!error::IsError(error) && decoder_->WasContextLost()) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error::kLostContext);
+ }
+ decoder_->EndDecoding();
+ decoder_->AddProcessingCommandsTime(
+ base::TimeTicks::HighResNow() - begin_time);
+ }
+}
+
+void GpuScheduler::SetScheduled(bool scheduled) {
+ TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
+ "new unscheduled_count_",
+ unscheduled_count_ + (scheduled? -1 : 1));
+ if (scheduled) {
+ // If the scheduler was rescheduled after a timeout, ignore the subsequent
+ // calls to SetScheduled when they eventually arrive until they are all
+ // accounted for.
+ if (rescheduled_count_ > 0) {
+ --rescheduled_count_;
+ return;
+ } else {
+ --unscheduled_count_;
+ }
+
+ DCHECK_GE(unscheduled_count_, 0);
+
+ if (unscheduled_count_ == 0) {
+ TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
+ "GpuScheduler", this);
+ // When the scheduler transitions from the unscheduled to the scheduled
+ // state, cancel the task that would reschedule it after a timeout.
+ reschedule_task_factory_.InvalidateWeakPtrs();
+
+ if (!scheduling_changed_callback_.is_null())
+ scheduling_changed_callback_.Run(true);
+ }
+ } else {
+ ++unscheduled_count_;
+ if (unscheduled_count_ == 1) {
+ TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
+ "GpuScheduler", this);
+#if defined(OS_WIN)
+ if (base::win::GetVersion() < base::win::VERSION_VISTA) {
+ // When the scheduler transitions from scheduled to unscheduled, post a
+ // delayed task that it will force it back into a scheduled state after
+ // a timeout. This should only be necessary on pre-Vista.
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GpuScheduler::RescheduleTimeOut,
+ reschedule_task_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
+ }
+#endif
+ if (!scheduling_changed_callback_.is_null())
+ scheduling_changed_callback_.Run(false);
+ }
+ }
+}
+
+bool GpuScheduler::IsScheduled() {
+ return unscheduled_count_ == 0;
+}
+
+bool GpuScheduler::HasMoreWork() {
+ return !unschedule_fences_.empty() ||
+ (decoder_ && decoder_->ProcessPendingQueries()) ||
+ HasMoreIdleWork();
+}
+
+void GpuScheduler::SetSchedulingChangedCallback(
+ const SchedulingChangedCallback& callback) {
+ scheduling_changed_callback_ = callback;
+}
+
+scoped_refptr<Buffer> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
+ return command_buffer_->GetTransferBuffer(shm_id);
+}
+
+void GpuScheduler::set_token(int32 token) {
+ command_buffer_->SetToken(token);
+}
+
+bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
+ scoped_refptr<Buffer> ring_buffer =
+ command_buffer_->GetTransferBuffer(transfer_buffer_id);
+ if (!ring_buffer.get()) {
+ return false;
+ }
+
+ if (!parser_.get()) {
+ parser_.reset(new CommandParser(handler_));
+ }
+
+ parser_->SetBuffer(
+ ring_buffer->memory(), ring_buffer->size(), 0, ring_buffer->size());
+
+ SetGetOffset(0);
+ return true;
+}
+
+bool GpuScheduler::SetGetOffset(int32 offset) {
+ if (parser_->set_get(offset)) {
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+ return true;
+ }
+ return false;
+}
+
+int32 GpuScheduler::GetGetOffset() {
+ return parser_->get();
+}
+
+void GpuScheduler::SetCommandProcessedCallback(
+ const base::Closure& callback) {
+ command_processed_callback_ = callback;
+}
+
+void GpuScheduler::DeferToFence(base::Closure task) {
+ unschedule_fences_.push(make_linked_ptr(
+ new UnscheduleFence(gfx::GLFence::Create(), task)));
+ SetScheduled(false);
+}
+
+bool GpuScheduler::PollUnscheduleFences() {
+ if (unschedule_fences_.empty())
+ return true;
+
+ if (unschedule_fences_.front()->fence.get()) {
+ base::Time now = base::Time::Now();
+ base::TimeDelta timeout =
+ base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
+
+ while (!unschedule_fences_.empty()) {
+ const UnscheduleFence& fence = *unschedule_fences_.front();
+ if (fence.fence->HasCompleted() ||
+ now - fence.issue_time > timeout) {
+ unschedule_fences_.front()->task.Run();
+ unschedule_fences_.pop();
+ SetScheduled(true);
+ } else {
+ return false;
+ }
+ }
+ } else {
+ glFinish();
+
+ while (!unschedule_fences_.empty()) {
+ unschedule_fences_.front()->task.Run();
+ unschedule_fences_.pop();
+ SetScheduled(true);
+ }
+ }
+
+ return true;
+}
+
+bool GpuScheduler::IsPreempted() {
+ if (!preemption_flag_.get())
+ return false;
+
+ if (!was_preempted_ && preemption_flag_->IsSet()) {
+ TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
+ was_preempted_ = true;
+ } else if (was_preempted_ && !preemption_flag_->IsSet()) {
+ TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
+ was_preempted_ = false;
+ }
+
+ return preemption_flag_->IsSet();
+}
+
+bool GpuScheduler::HasMoreIdleWork() {
+ return (decoder_ && decoder_->HasMoreIdleWork());
+}
+
+void GpuScheduler::PerformIdleWork() {
+ if (!decoder_)
+ return;
+ decoder_->PerformIdleWork();
+}
+
+void GpuScheduler::RescheduleTimeOut() {
+ int new_count = unscheduled_count_ + rescheduled_count_;
+
+ rescheduled_count_ = 0;
+
+ while (unscheduled_count_)
+ SetScheduled(true);
+
+ rescheduled_count_ = new_count;
+}
+
+GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
+ base::Closure task_)
+ : fence(fence_),
+ issue_time(base::Time::Now()),
+ task(task_) {
+}
+
+GpuScheduler::UnscheduleFence::~UnscheduleFence() {
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
new file mode 100644
index 0000000..0390632
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
+
+#include <queue>
+
+#include "base/atomic_ref_count.h"
+#include "base/atomicops.h"
+#include "base/callback.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GLFence;
+}
+
+namespace gpu {
+
+class PreemptionFlag
+ : public base::RefCountedThreadSafe<PreemptionFlag> {
+ public:
+ PreemptionFlag() : flag_(0) {}
+
+ bool IsSet() { return !base::AtomicRefCountIsZero(&flag_); }
+ void Set() { base::AtomicRefCountInc(&flag_); }
+ void Reset() { base::subtle::NoBarrier_Store(&flag_, 0); }
+
+ private:
+ base::AtomicRefCount flag_;
+
+ ~PreemptionFlag() {}
+
+ friend class base::RefCountedThreadSafe<PreemptionFlag>;
+};
+
+// This class schedules commands that have been flushed. They are received via
+// a command buffer and forwarded to a command parser. TODO(apatrick): This
+// class should not know about the decoder. Do not add additional dependencies
+// on it.
+class GPU_EXPORT GpuScheduler
+ : NON_EXPORTED_BASE(public CommandBufferEngine),
+ public base::SupportsWeakPtr<GpuScheduler> {
+ public:
+ GpuScheduler(CommandBufferServiceBase* command_buffer,
+ AsyncAPIInterface* handler,
+ gles2::GLES2Decoder* decoder);
+
+ virtual ~GpuScheduler();
+
+ void PutChanged();
+
+ void SetPreemptByFlag(scoped_refptr<PreemptionFlag> flag) {
+ preemption_flag_ = flag;
+ }
+
+ // Sets whether commands should be processed by this scheduler. Setting to
+ // false unschedules. Setting to true reschedules. Whether or not the
+ // scheduler is currently scheduled is "reference counted". Every call with
+ // false must eventually be paired by a call with true.
+ void SetScheduled(bool is_scheduled);
+
+ // Returns whether the scheduler is currently able to process more commands.
+ bool IsScheduled();
+
+ // Returns whether the scheduler needs to be polled again in the future.
+ bool HasMoreWork();
+
+ typedef base::Callback<void(bool /* scheduled */)> SchedulingChangedCallback;
+
+ // Sets a callback that is invoked just before scheduler is rescheduled
+ // or descheduled. Takes ownership of callback object.
+ void SetSchedulingChangedCallback(const SchedulingChangedCallback& callback);
+
+ // Implementation of CommandBufferEngine.
+ virtual scoped_refptr<Buffer> GetSharedMemoryBuffer(int32 shm_id) OVERRIDE;
+ virtual void set_token(int32 token) OVERRIDE;
+ virtual bool SetGetBuffer(int32 transfer_buffer_id) OVERRIDE;
+ virtual bool SetGetOffset(int32 offset) OVERRIDE;
+ virtual int32 GetGetOffset() OVERRIDE;
+
+ void SetCommandProcessedCallback(const base::Closure& callback);
+
+ void DeferToFence(base::Closure task);
+
+ // Polls the fences, invoking callbacks that were waiting to be triggered
+ // by them and returns whether all fences were complete.
+ bool PollUnscheduleFences();
+
+ bool HasMoreIdleWork();
+ void PerformIdleWork();
+
+ CommandParser* parser() const {
+ return parser_.get();
+ }
+
+ bool IsPreempted();
+
+ private:
+ // Artificially reschedule if the scheduler is still unscheduled after a
+ // timeout.
+ void RescheduleTimeOut();
+
+ // The GpuScheduler holds a weak reference to the CommandBuffer. The
+ // CommandBuffer owns the GpuScheduler and holds a strong reference to it
+ // through the ProcessCommands callback.
+ CommandBufferServiceBase* command_buffer_;
+
+ // The parser uses this to execute commands.
+ AsyncAPIInterface* handler_;
+
+ // Does not own decoder. TODO(apatrick): The GpuScheduler shouldn't need a
+ // pointer to the decoder, it is only used to initialize the CommandParser,
+ // which could be an argument to the constructor, and to determine the
+ // reason for context lost.
+ gles2::GLES2Decoder* decoder_;
+
+ // TODO(apatrick): The GpuScheduler currently creates and owns the parser.
+ // This should be an argument to the constructor.
+ scoped_ptr<CommandParser> parser_;
+
+ // Greater than zero if this is waiting to be rescheduled before continuing.
+ int unscheduled_count_;
+
+ // The number of times this scheduler has been artificially rescheduled on
+ // account of a timeout.
+ int rescheduled_count_;
+
+ // The GpuScheduler will unschedule itself in the event that further GL calls
+ // are issued to it before all these fences have been crossed by the GPU.
+ struct UnscheduleFence {
+ UnscheduleFence(gfx::GLFence* fence, base::Closure task);
+ ~UnscheduleFence();
+
+ scoped_ptr<gfx::GLFence> fence;
+ base::Time issue_time;
+ base::Closure task;
+ };
+ std::queue<linked_ptr<UnscheduleFence> > unschedule_fences_;
+
+ SchedulingChangedCallback scheduling_changed_callback_;
+ base::Closure descheduled_callback_;
+ base::Closure command_processed_callback_;
+
+ // If non-NULL and |preemption_flag_->IsSet()|, exit PutChanged early.
+ scoped_refptr<PreemptionFlag> preemption_flag_;
+ bool was_preempted_;
+
+ // A factory for outstanding rescheduling tasks that is invalidated whenever
+ // the scheduler is rescheduled.
+ base::WeakPtrFactory<GpuScheduler> reschedule_task_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuScheduler);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler_mock.h b/gpu/command_buffer/service/gpu_scheduler_mock.h
new file mode 100644
index 0000000..ed308e0
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler_mock.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
+
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+class MockGpuScheduler : public GpuScheduler {
+ public:
+ explicit MockGpuScheduler(CommandBuffer* command_buffer)
+ : GpuScheduler(command_buffer) {
+ }
+
+ MOCK_METHOD1(GetSharedMemoryBuffer, Buffer(int32 shm_id));
+ MOCK_METHOD1(set_token, void(int32 token));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockGpuScheduler);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_MOCK_H_
diff --git a/gpu/command_buffer/service/gpu_scheduler_unittest.cc b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
new file mode 100644
index 0000000..c658d2b
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
@@ -0,0 +1,221 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/common/command_buffer_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+using testing::_;
+using testing::DoAll;
+using testing::Invoke;
+using testing::NiceMock;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+
+namespace gpu {
+
+const size_t kRingBufferSize = 1024;
+const size_t kRingBufferEntries = kRingBufferSize / sizeof(CommandBufferEntry);
+
+class GpuSchedulerTest : public testing::Test {
+ protected:
+ static const int32 kTransferBufferId = 123;
+
+ virtual void SetUp() {
+ scoped_ptr<base::SharedMemory> shared_memory(new ::base::SharedMemory);
+ shared_memory->CreateAndMapAnonymous(kRingBufferSize);
+ buffer_ = static_cast<int32*>(shared_memory->memory());
+ shared_memory_buffer_ =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), kRingBufferSize);
+ memset(buffer_, 0, kRingBufferSize);
+
+ command_buffer_.reset(new MockCommandBuffer);
+
+ CommandBuffer::State default_state;
+ default_state.num_entries = kRingBufferEntries;
+ ON_CALL(*command_buffer_.get(), GetLastState())
+ .WillByDefault(Return(default_state));
+
+ decoder_.reset(new gles2::MockGLES2Decoder());
+ // Install FakeDoCommands handler so we can use individual DoCommand()
+ // expectations.
+ EXPECT_CALL(*decoder_, DoCommands(_, _, _, _)).WillRepeatedly(
+ Invoke(decoder_.get(), &gles2::MockGLES2Decoder::FakeDoCommands));
+
+ scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
+ decoder_.get(),
+ decoder_.get()));
+ EXPECT_CALL(*command_buffer_, GetTransferBuffer(kTransferBufferId))
+ .WillOnce(Return(shared_memory_buffer_));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(0));
+ EXPECT_TRUE(scheduler_->SetGetBuffer(kTransferBufferId));
+ }
+
+ virtual void TearDown() {
+ // Ensure that any unexpected tasks posted by the GPU scheduler are executed
+ // in order to fail the test.
+ base::MessageLoop::current()->RunUntilIdle();
+ }
+
+ error::Error GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop;
+ scoped_ptr<MockCommandBuffer> command_buffer_;
+ scoped_refptr<Buffer> shared_memory_buffer_;
+ int32* buffer_;
+ scoped_ptr<gles2::MockGLES2Decoder> decoder_;
+ scoped_ptr<GpuScheduler> scheduler_;
+};
+
+TEST_F(GpuSchedulerTest, SchedulerDoesNothingIfRingBufferIsEmpty) {
+ CommandBuffer::State state;
+
+ state.put_offset = 0;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*command_buffer_, SetParseError(_))
+ .Times(0);
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, GetSetBuffer) {
+ CommandBuffer::State state;
+
+ // Set the get offset to something not 0.
+ EXPECT_CALL(*command_buffer_, SetGetOffset(2));
+ scheduler_->SetGetOffset(2);
+ EXPECT_EQ(2, scheduler_->GetGetOffset());
+
+ // Set the buffer.
+ EXPECT_CALL(*command_buffer_, GetTransferBuffer(kTransferBufferId))
+ .WillOnce(Return(shared_memory_buffer_));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(0));
+ EXPECT_TRUE(scheduler_->SetGetBuffer(kTransferBufferId));
+
+ // Check the get offset was reset.
+ EXPECT_EQ(0, scheduler_->GetGetOffset());
+}
+
+TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 2;
+ buffer_[1] = 123;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 2;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(2));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 1, &buffer_[0]))
+ .WillOnce(Return(error::kNoError));
+
+ EXPECT_CALL(*command_buffer_, SetParseError(_))
+ .Times(0);
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 2;
+ buffer_[1] = 123;
+ header[2].command = 8;
+ header[2].size = 1;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 3;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 1, &buffer_[0]))
+ .WillOnce(Return(error::kNoError));
+
+ EXPECT_CALL(*decoder_, DoCommand(8, 0, &buffer_[2]))
+ .WillOnce(Return(error::kNoError));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(3));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 1;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 1;
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*decoder_, DoCommand(7, 0, &buffer_[0]))
+ .WillOnce(Return(
+ error::kUnknownCommand));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(1));
+
+ EXPECT_CALL(*command_buffer_, SetContextLostReason(_));
+ EXPECT_CALL(*decoder_, GetContextLostReason())
+ .WillOnce(Return(error::kUnknown));
+ EXPECT_CALL(*command_buffer_,
+ SetParseError(error::kUnknownCommand));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
+ CommandBuffer::State state;
+ state.error = error::kGenericError;
+
+ EXPECT_CALL(*command_buffer_, GetLastState())
+ .WillRepeatedly(Return(state));
+
+ scheduler_->PutChanged();
+}
+
+TEST_F(GpuSchedulerTest, CanGetAddressOfSharedMemory) {
+ EXPECT_CALL(*command_buffer_.get(), GetTransferBuffer(7))
+ .WillOnce(Return(shared_memory_buffer_));
+
+ EXPECT_EQ(&buffer_[0], scheduler_->GetSharedMemoryBuffer(7)->memory());
+}
+
+ACTION_P2(SetPointee, address, value) {
+ *address = value;
+}
+
+TEST_F(GpuSchedulerTest, CanGetSizeOfSharedMemory) {
+ EXPECT_CALL(*command_buffer_.get(), GetTransferBuffer(7))
+ .WillOnce(Return(shared_memory_buffer_));
+
+ EXPECT_EQ(kRingBufferSize, scheduler_->GetSharedMemoryBuffer(7)->size());
+}
+
+TEST_F(GpuSchedulerTest, SetTokenForwardsToCommandBuffer) {
+ EXPECT_CALL(*command_buffer_, SetToken(7));
+ scheduler_->set_token(7);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_service_test.cc b/gpu/command_buffer/service/gpu_service_test.cc
new file mode 100644
index 0000000..a7c9db1
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_service_test.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_service_test.h"
+
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub_with_extensions.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+namespace gles2 {
+
+GpuServiceTest::GpuServiceTest() : ran_setup_(false), ran_teardown_(false) {
+}
+
+GpuServiceTest::~GpuServiceTest() {
+ DCHECK(ran_teardown_);
+}
+
+void GpuServiceTest::SetUpWithGLVersion(const char* gl_version,
+ const char* gl_extensions) {
+ testing::Test::SetUp();
+
+ gfx::SetGLGetProcAddressProc(gfx::MockGLInterface::GetGLProcAddress);
+ gfx::GLSurface::InitializeOneOffWithMockBindingsForTests();
+ gl_.reset(new ::testing::StrictMock< ::gfx::MockGLInterface>());
+ ::gfx::MockGLInterface::SetGLInterface(gl_.get());
+
+ context_ = new gfx::GLContextStubWithExtensions;
+ context_->AddExtensionsString(gl_extensions);
+ context_->SetGLVersionString(gl_version);
+ gfx::GLSurface::InitializeDynamicMockBindingsForTests(context_.get());
+ ran_setup_ = true;
+}
+
+void GpuServiceTest::SetUp() {
+ SetUpWithGLVersion("2.0", NULL);
+}
+
+void GpuServiceTest::TearDown() {
+ DCHECK(ran_setup_);
+ ::gfx::MockGLInterface::SetGLInterface(NULL);
+ gl_.reset();
+ gfx::ClearGLBindings();
+ ran_teardown_ = true;
+
+ testing::Test::TearDown();
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_service_test.h b/gpu/command_buffer/service/gpu_service_test.h
new file mode 100644
index 0000000..c467c14
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_service_test.h
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SERVICE_TEST_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SERVICE_TEST_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gfx {
+class GLContextStubWithExtensions;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// Base class for tests that need mock GL bindings.
+class GpuServiceTest : public testing::Test {
+ public:
+ GpuServiceTest();
+ virtual ~GpuServiceTest();
+
+ protected:
+ void SetUpWithGLVersion(const char* gl_version, const char* gl_extensions);
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ scoped_ptr< ::testing::StrictMock< ::gfx::MockGLInterface> > gl_;
+
+ private:
+ bool ran_setup_;
+ bool ran_teardown_;
+ scoped_refptr<gfx::GLContextStubWithExtensions> context_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
diff --git a/gpu/command_buffer/service/gpu_state_tracer.cc b/gpu/command_buffer/service/gpu_state_tracer.cc
new file mode 100644
index 0000000..6eb5007
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_state_tracer.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_state_tracer.h"
+
+#include "base/base64.h"
+#include "base/debug/trace_event.h"
+#include "context_state.h"
+#include "ui/gfx/codec/png_codec.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+namespace {
+
+const int kBytesPerPixel = 4;
+
+class Snapshot : public base::debug::ConvertableToTraceFormat {
+ public:
+ static scoped_refptr<Snapshot> Create(const ContextState* state);
+
+ // Save a screenshot of the currently bound framebuffer.
+ bool SaveScreenshot(const gfx::Size& size);
+
+ // base::debug::ConvertableToTraceFormat implementation.
+ virtual void AppendAsTraceFormat(std::string* out) const OVERRIDE;
+
+ private:
+ explicit Snapshot(const ContextState* state);
+ virtual ~Snapshot() {}
+
+ const ContextState* state_;
+
+ std::vector<unsigned char> screenshot_pixels_;
+ gfx::Size screenshot_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(Snapshot);
+};
+
+} // namespace
+
+Snapshot::Snapshot(const ContextState* state) : state_(state) {}
+
+scoped_refptr<Snapshot> Snapshot::Create(const ContextState* state) {
+ return scoped_refptr<Snapshot>(new Snapshot(state));
+}
+
+bool Snapshot::SaveScreenshot(const gfx::Size& size) {
+ screenshot_size_ = size;
+ screenshot_pixels_.resize(screenshot_size_.width() *
+ screenshot_size_.height() * kBytesPerPixel);
+
+ glPixelStorei(GL_PACK_ALIGNMENT, kBytesPerPixel);
+ glReadPixels(0,
+ 0,
+ screenshot_size_.width(),
+ screenshot_size_.height(),
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &screenshot_pixels_[0]);
+ glPixelStorei(GL_PACK_ALIGNMENT, state_->pack_alignment);
+
+ // Flip the screenshot vertically.
+ int bytes_per_row = screenshot_size_.width() * kBytesPerPixel;
+ for (int y = 0; y < screenshot_size_.height() / 2; y++) {
+ for (int x = 0; x < bytes_per_row; x++) {
+ std::swap(screenshot_pixels_[y * bytes_per_row + x],
+ screenshot_pixels_
+ [(screenshot_size_.height() - y - 1) * bytes_per_row + x]);
+ }
+ }
+ return true;
+}
+
+void Snapshot::AppendAsTraceFormat(std::string* out) const {
+ *out += "{";
+ if (screenshot_pixels_.size()) {
+ std::vector<unsigned char> png_data;
+ int bytes_per_row = screenshot_size_.width() * kBytesPerPixel;
+ bool png_ok = gfx::PNGCodec::Encode(&screenshot_pixels_[0],
+ gfx::PNGCodec::FORMAT_RGBA,
+ screenshot_size_,
+ bytes_per_row,
+ false,
+ std::vector<gfx::PNGCodec::Comment>(),
+ &png_data);
+ DCHECK(png_ok);
+
+ base::StringPiece base64_input(reinterpret_cast<const char*>(&png_data[0]),
+ png_data.size());
+ std::string base64_output;
+ Base64Encode(base64_input, &base64_output);
+
+ *out += "\"screenshot\":\"" + base64_output + "\"";
+ }
+ *out += "}";
+}
+
+scoped_ptr<GPUStateTracer> GPUStateTracer::Create(const ContextState* state) {
+ return scoped_ptr<GPUStateTracer>(new GPUStateTracer(state));
+}
+
+GPUStateTracer::GPUStateTracer(const ContextState* state) : state_(state) {
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"), "gpu::State", state_);
+}
+
+GPUStateTracer::~GPUStateTracer() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"), "gpu::State", state_);
+}
+
+void GPUStateTracer::TakeSnapshotWithCurrentFramebuffer(const gfx::Size& size) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ "GPUStateTracer::TakeSnapshotWithCurrentFramebuffer");
+
+ scoped_refptr<Snapshot> snapshot(Snapshot::Create(state_));
+
+ // Only save a screenshot for now.
+ if (!snapshot->SaveScreenshot(size))
+ return;
+
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
+ "gpu::State",
+ state_,
+ scoped_refptr<base::debug::ConvertableToTraceFormat>(snapshot));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_state_tracer.h b/gpu/command_buffer/service/gpu_state_tracer.h
new file mode 100644
index 0000000..38998f3
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_state_tracer.h
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+
+namespace gfx {
+class Size;
+}
+
+namespace gpu {
+namespace gles2 {
+
+struct ContextState;
+
+// Saves GPU state such as framebuffer contents while tracing.
+class GPUStateTracer {
+ public:
+ static scoped_ptr<GPUStateTracer> Create(const ContextState* state);
+ ~GPUStateTracer();
+
+ // Take a state snapshot with a screenshot of the currently bound framebuffer.
+ void TakeSnapshotWithCurrentFramebuffer(const gfx::Size& size);
+
+ private:
+ explicit GPUStateTracer(const ContextState* state);
+
+ const ContextState* state_;
+ DISALLOW_COPY_AND_ASSIGN(GPUStateTracer);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_STATE_TRACER_H_
diff --git a/gpu/command_buffer/service/gpu_switches.cc b/gpu/command_buffer/service/gpu_switches.cc
new file mode 100644
index 0000000..0491c41
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_switches.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "base/basictypes.h"
+
+namespace switches {
+
+// Always return success when compiling a shader. Linking will still fail.
+const char kCompileShaderAlwaysSucceeds[] = "compile-shader-always-succeeds";
+
+// Disable the GL error log limit.
+const char kDisableGLErrorLimit[] = "disable-gl-error-limit";
+
+// Disable the GLSL translator.
+const char kDisableGLSLTranslator[] = "disable-glsl-translator";
+
+// Disable workarounds for various GPU driver bugs.
+const char kDisableGpuDriverBugWorkarounds[] =
+ "disable-gpu-driver-bug-workarounds";
+
+// Turn off user-defined name hashing in shaders.
+const char kDisableShaderNameHashing[] = "disable-shader-name-hashing";
+
+// Turn on Logging GPU commands.
+const char kEnableGPUCommandLogging[] = "enable-gpu-command-logging";
+
+// Turn on Calling GL Error after every command.
+const char kEnableGPUDebugging[] = "enable-gpu-debugging";
+
+// Enable GPU service logging. Note: This is the same switch as the one in
+// gl_switches.cc. It's defined here again to avoid dependencies between
+// dlls.
+const char kEnableGPUServiceLoggingGPU[] = "enable-gpu-service-logging";
+
+// Turn off gpu program caching
+const char kDisableGpuProgramCache[] = "disable-gpu-program-cache";
+
+// Enforce GL minimums.
+const char kEnforceGLMinimums[] = "enforce-gl-minimums";
+
+// Sets the total amount of memory that may be allocated for GPU resources
+const char kForceGpuMemAvailableMb[] = "force-gpu-mem-available-mb";
+
+// Pass a set of GpuDriverBugWorkaroundType ids, seperated by ','.
+const char kGpuDriverBugWorkarounds[] = "gpu-driver-bug-workarounds";
+
+// Sets the maximum size of the in-memory gpu program cache, in kb
+const char kGpuProgramCacheSizeKb[] = "gpu-program-cache-size-kb";
+
+// Disables the GPU shader on disk cache.
+const char kDisableGpuShaderDiskCache[] = "disable-gpu-shader-disk-cache";
+
+// Allows async texture uploads (off main thread) via GL context sharing.
+const char kEnableShareGroupAsyncTextureUpload[] =
+ "enable-share-group-async-texture-upload";
+
+const char* kGpuSwitches[] = {
+ kCompileShaderAlwaysSucceeds,
+ kDisableGLErrorLimit,
+ kDisableGLSLTranslator,
+ kDisableGpuDriverBugWorkarounds,
+ kDisableShaderNameHashing,
+ kEnableGPUCommandLogging,
+ kEnableGPUDebugging,
+ kEnableGPUServiceLoggingGPU,
+ kDisableGpuProgramCache,
+ kEnforceGLMinimums,
+ kForceGpuMemAvailableMb,
+ kGpuDriverBugWorkarounds,
+ kGpuProgramCacheSizeKb,
+ kDisableGpuShaderDiskCache,
+ kEnableShareGroupAsyncTextureUpload,
+};
+
+const int kNumGpuSwitches = arraysize(kGpuSwitches);
+
+} // namespace switches
diff --git a/gpu/command_buffer/service/gpu_switches.h b/gpu/command_buffer/service/gpu_switches.h
new file mode 100644
index 0000000..d582b7a
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_switches.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/command_buffer/service/.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+
+#include "gpu/gpu_export.h"
+
+namespace switches {
+
+GPU_EXPORT extern const char kCompileShaderAlwaysSucceeds[];
+GPU_EXPORT extern const char kDisableGLErrorLimit[];
+GPU_EXPORT extern const char kDisableGLSLTranslator[];
+GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[];
+GPU_EXPORT extern const char kDisableShaderNameHashing[];
+GPU_EXPORT extern const char kEnableGPUCommandLogging[];
+GPU_EXPORT extern const char kEnableGPUDebugging[];
+GPU_EXPORT extern const char kEnableGPUServiceLoggingGPU[];
+GPU_EXPORT extern const char kDisableGpuProgramCache[];
+GPU_EXPORT extern const char kEnforceGLMinimums[];
+GPU_EXPORT extern const char kForceGpuMemAvailableMb[];
+GPU_EXPORT extern const char kGpuDriverBugWorkarounds[];
+GPU_EXPORT extern const char kGpuProgramCacheSizeKb[];
+GPU_EXPORT extern const char kDisableGpuShaderDiskCache[];
+GPU_EXPORT extern const char kEnableShareGroupAsyncTextureUpload[];
+
+GPU_EXPORT extern const char* kGpuSwitches[];
+GPU_EXPORT extern const int kNumGpuSwitches;
+
+} // namespace switches
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_SWITCHES_H_
+
diff --git a/gpu/command_buffer/service/gpu_tracer.cc b/gpu/command_buffer/service/gpu_tracer.cc
new file mode 100644
index 0000000..024e4b6
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer.cc
@@ -0,0 +1,407 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gpu_tracer.h"
+
+#include <deque>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+static const unsigned int kProcessInterval = 16;
+static TraceOutputter* g_outputter_thread = NULL;
+
+TraceMarker::TraceMarker(const std::string& name)
+ : name_(name),
+ trace_(NULL) {
+}
+
+TraceMarker::~TraceMarker() {
+}
+
+scoped_refptr<TraceOutputter> TraceOutputter::Create(const std::string& name) {
+ if (!g_outputter_thread) {
+ g_outputter_thread = new TraceOutputter(name);
+ }
+ return g_outputter_thread;
+}
+
+TraceOutputter::TraceOutputter(const std::string& name)
+ : named_thread_(name.c_str()), local_trace_id_(0) {
+ named_thread_.Start();
+ named_thread_.Stop();
+}
+
+TraceOutputter::~TraceOutputter() { g_outputter_thread = NULL; }
+
+void TraceOutputter::Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) {
+ TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"),
+ name.c_str(),
+ local_trace_id_,
+ named_thread_.thread_id(),
+ start_time);
+ TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"),
+ name.c_str(),
+ local_trace_id_,
+ named_thread_.thread_id(),
+ end_time);
+ ++local_trace_id_;
+}
+
+GPUTrace::GPUTrace(scoped_refptr<Outputter> outputter,
+ const std::string& name,
+ int64 offset,
+ GpuTracerType tracer_type)
+ : name_(name),
+ outputter_(outputter),
+ offset_(offset),
+ start_time_(0),
+ end_time_(0),
+ tracer_type_(tracer_type),
+ end_requested_(false) {
+ memset(queries_, 0, sizeof(queries_));
+ switch (tracer_type_) {
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ glGenQueriesARB(2, queries_);
+ break;
+
+ default:
+ tracer_type_ = kTracerTypeInvalid;
+ }
+}
+
+GPUTrace::~GPUTrace() {
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ glDeleteQueriesARB(2, queries_);
+ break;
+ }
+}
+
+void GPUTrace::Start() {
+ TRACE_EVENT_COPY_ASYNC_BEGIN0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"), name().c_str(), this);
+
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeDisjointTimer:
+ // For the disjoint timer, GPU idle time does not seem to increment the
+ // internal counter. We must calculate the offset before any query. The
+ // good news is any device that supports disjoint timer will also support
+ // glGetInteger64v, so we can query it directly unlike the ARBTimer case.
+ // The "offset_" variable will always be 0 during normal use cases, only
+ // under the unit tests will it be set to specific test values.
+ if (offset_ == 0) {
+ GLint64 gl_now = 0;
+ glGetInteger64v(GL_TIMESTAMP, &gl_now);
+ offset_ = base::TimeTicks::NowFromSystemTraceTime().ToInternalValue() -
+ gl_now / base::Time::kNanosecondsPerMicrosecond;
+ }
+ // Intentionally fall through to kTracerTypeARBTimer case.xs
+ case kTracerTypeARBTimer:
+ // GL_TIMESTAMP and GL_TIMESTAMP_EXT both have the same value.
+ glQueryCounter(queries_[0], GL_TIMESTAMP);
+ break;
+ }
+}
+
+void GPUTrace::End() {
+ end_requested_ = true;
+ switch (tracer_type_) {
+ case kTracerTypeInvalid:
+ break;
+
+ case kTracerTypeARBTimer:
+ case kTracerTypeDisjointTimer:
+ // GL_TIMESTAMP and GL_TIMESTAMP_EXT both have the same value.
+ glQueryCounter(queries_[1], GL_TIMESTAMP);
+ break;
+ }
+
+ TRACE_EVENT_COPY_ASYNC_END0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"), name().c_str(), this);
+}
+
+bool GPUTrace::IsAvailable() {
+ if (tracer_type_ != kTracerTypeInvalid) {
+ if (!end_requested_)
+ return false;
+
+ GLint done = 0;
+ glGetQueryObjectiv(queries_[1], GL_QUERY_RESULT_AVAILABLE, &done);
+ return !!done;
+ }
+
+ return true;
+}
+
+void GPUTrace::Process() {
+ if (tracer_type_ == kTracerTypeInvalid)
+ return;
+
+ DCHECK(IsAvailable());
+
+ GLuint64 begin_stamp = 0;
+ GLuint64 end_stamp = 0;
+
+ // TODO(dsinclair): It's possible for the timer to wrap during the start/end.
+ // We need to detect if the end is less then the start and correct for the
+ // wrapping.
+ glGetQueryObjectui64v(queries_[0], GL_QUERY_RESULT, &begin_stamp);
+ glGetQueryObjectui64v(queries_[1], GL_QUERY_RESULT, &end_stamp);
+
+ start_time_ = (begin_stamp / base::Time::kNanosecondsPerMicrosecond) +
+ offset_;
+ end_time_ = (end_stamp / base::Time::kNanosecondsPerMicrosecond) + offset_;
+ outputter_->Trace(name(), start_time_, end_time_);
+}
+
+GPUTracer::GPUTracer(gles2::GLES2Decoder* decoder)
+ : gpu_trace_srv_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("gpu.service"))),
+ gpu_trace_dev_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("gpu.device"))),
+ decoder_(decoder),
+ timer_offset_(0),
+ last_tracer_source_(kTraceGroupInvalid),
+ tracer_type_(kTracerTypeInvalid),
+ gpu_timing_synced_(false),
+ gpu_executing_(false),
+ process_posted_(false) {
+ if (gfx::g_driver_gl.ext.b_GL_EXT_disjoint_timer_query) {
+ tracer_type_ = kTracerTypeDisjointTimer;
+ outputter_ = TraceOutputter::Create("GL_EXT_disjoint_timer_query");
+ } else if (gfx::g_driver_gl.ext.b_GL_ARB_timer_query) {
+ tracer_type_ = kTracerTypeARBTimer;
+ outputter_ = TraceOutputter::Create("GL_ARB_timer_query");
+ }
+}
+
+GPUTracer::~GPUTracer() {
+}
+
+bool GPUTracer::BeginDecoding() {
+ if (gpu_executing_)
+ return false;
+
+ CalculateTimerOffset();
+ gpu_executing_ = true;
+
+ if (IsTracing()) {
+ // Reset disjoint bit for the disjoint timer.
+ if (tracer_type_ == kTracerTypeDisjointTimer) {
+ GLint disjoint_value = 0;
+ glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_value);
+ }
+
+ // Begin a Trace for all active markers
+ for (int n = 0; n < NUM_TRACER_SOURCES; n++) {
+ for (size_t i = 0; i < markers_[n].size(); i++) {
+ markers_[n][i].trace_ = CreateTrace(markers_[n][i].name_);
+ markers_[n][i].trace_->Start();
+ }
+ }
+ }
+ return true;
+}
+
+bool GPUTracer::EndDecoding() {
+ if (!gpu_executing_)
+ return false;
+
+ // End Trace for all active markers
+ if (IsTracing()) {
+ for (int n = 0; n < NUM_TRACER_SOURCES; n++) {
+ for (size_t i = 0; i < markers_[n].size(); i++) {
+ if (markers_[n][i].trace_.get()) {
+ markers_[n][i].trace_->End();
+ if (markers_[n][i].trace_->IsEnabled())
+ traces_.push_back(markers_[n][i].trace_);
+ markers_[n][i].trace_ = 0;
+ }
+ }
+ }
+ IssueProcessTask();
+ }
+
+ gpu_executing_ = false;
+
+ // NOTE(vmiura): glFlush() here can help give better trace results,
+ // but it distorts the normal device behavior.
+ return true;
+}
+
+bool GPUTracer::Begin(const std::string& name, GpuTracerSource source) {
+ if (!gpu_executing_)
+ return false;
+
+ DCHECK(source >= 0 && source < NUM_TRACER_SOURCES);
+
+ // Push new marker from given 'source'
+ last_tracer_source_ = source;
+ markers_[source].push_back(TraceMarker(name));
+
+ // Create trace
+ if (IsTracing()) {
+ scoped_refptr<GPUTrace> trace = CreateTrace(name);
+ trace->Start();
+ markers_[source].back().trace_ = trace;
+ }
+
+ return true;
+}
+
+bool GPUTracer::End(GpuTracerSource source) {
+ if (!gpu_executing_)
+ return false;
+
+ DCHECK(source >= 0 && source < NUM_TRACER_SOURCES);
+
+ // Pop last marker with matching 'source'
+ if (!markers_[source].empty()) {
+ if (IsTracing()) {
+ scoped_refptr<GPUTrace> trace = markers_[source].back().trace_;
+ if (trace.get()) {
+ trace->End();
+ if (trace->IsEnabled())
+ traces_.push_back(trace);
+ IssueProcessTask();
+ }
+ }
+
+ markers_[source].pop_back();
+ return true;
+ }
+ return false;
+}
+
+bool GPUTracer::IsTracing() {
+ return (*gpu_trace_srv_category != 0) || (*gpu_trace_dev_category != 0);
+}
+
+const std::string& GPUTracer::CurrentName() const {
+ if (last_tracer_source_ >= 0 &&
+ last_tracer_source_ < NUM_TRACER_SOURCES &&
+ !markers_[last_tracer_source_].empty()) {
+ return markers_[last_tracer_source_].back().name_;
+ }
+ return base::EmptyString();
+}
+
+scoped_refptr<GPUTrace> GPUTracer::CreateTrace(const std::string& name) {
+ GpuTracerType tracer_type = *gpu_trace_dev_category ? tracer_type_ :
+ kTracerTypeInvalid;
+
+ return new GPUTrace(outputter_, name, timer_offset_, tracer_type);
+}
+
+void GPUTracer::Process() {
+ process_posted_ = false;
+ ProcessTraces();
+ IssueProcessTask();
+}
+
+void GPUTracer::ProcessTraces() {
+ if (tracer_type_ == kTracerTypeInvalid) {
+ traces_.clear();
+ return;
+ }
+
+ TRACE_EVENT0("gpu", "GPUTracer::ProcessTraces");
+
+ // Make owning decoder's GL context current
+ if (!decoder_->MakeCurrent()) {
+ // Skip subsequent GL calls if MakeCurrent fails
+ traces_.clear();
+ return;
+ }
+
+ // Check if disjoint operation has occurred, discard ongoing traces if so.
+ if (tracer_type_ == kTracerTypeDisjointTimer) {
+ GLint disjoint_value = 0;
+ glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_value);
+ if (disjoint_value)
+ traces_.clear();
+ }
+
+ while (!traces_.empty() && traces_.front()->IsAvailable()) {
+ traces_.front()->Process();
+ traces_.pop_front();
+ }
+
+ // Clear pending traces if there were are any errors
+ GLenum err = glGetError();
+ if (err != GL_NO_ERROR)
+ traces_.clear();
+}
+
+void GPUTracer::CalculateTimerOffset() {
+ if (tracer_type_ != kTracerTypeInvalid) {
+ if (*gpu_trace_dev_category == '\0') {
+ // If GPU device category is off, invalidate timing sync.
+ gpu_timing_synced_ = false;
+ return;
+ } else if (tracer_type_ == kTracerTypeDisjointTimer) {
+ // Disjoint timers offsets should be calculated before every query.
+ gpu_timing_synced_ = true;
+ timer_offset_ = 0;
+ }
+
+ if (gpu_timing_synced_)
+ return;
+
+ TRACE_EVENT0("gpu", "GPUTracer::CalculateTimerOffset");
+
+ // NOTE(vmiura): It would be better to use glGetInteger64v, however
+ // it's not available everywhere.
+ GLuint64 gl_now = 0;
+ GLuint query;
+
+ glGenQueriesARB(1, &query);
+
+ glFinish();
+ glQueryCounter(query, GL_TIMESTAMP);
+ glFinish();
+
+ glGetQueryObjectui64v(query, GL_QUERY_RESULT, &gl_now);
+ glDeleteQueriesARB(1, &query);
+
+ base::TimeTicks system_now = base::TimeTicks::NowFromSystemTraceTime();
+
+ gl_now /= base::Time::kNanosecondsPerMicrosecond;
+ timer_offset_ = system_now.ToInternalValue() - gl_now;
+ gpu_timing_synced_ = true;
+ }
+}
+
+void GPUTracer::IssueProcessTask() {
+ if (traces_.empty() || process_posted_)
+ return;
+
+ process_posted_ = true;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GPUTracer::Process, base::AsWeakPtr(this)),
+ base::TimeDelta::FromMilliseconds(kProcessInterval));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_tracer.h b/gpu/command_buffer/service/gpu_tracer.h
new file mode 100644
index 0000000..63e5646
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer.h
@@ -0,0 +1,173 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the GPUTrace class.
+#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Outputter;
+class GPUTrace;
+
+// Id used to keep trace namespaces separate
+enum GpuTracerSource {
+ kTraceGroupInvalid = -1,
+
+ kTraceGroupMarker = 0,
+ kTraceCHROMIUM = 1,
+ kTraceDecoder = 2,
+
+ NUM_TRACER_SOURCES
+};
+
+enum GpuTracerType {
+ kTracerTypeInvalid = -1,
+
+ kTracerTypeARBTimer,
+ kTracerTypeDisjointTimer
+};
+
+// Marker structure for a Trace.
+struct TraceMarker {
+ TraceMarker(const std::string& name);
+ ~TraceMarker();
+
+ std::string name_;
+ scoped_refptr<GPUTrace> trace_;
+};
+
+// Traces GPU Commands.
+class GPUTracer : public base::SupportsWeakPtr<GPUTracer> {
+ public:
+ explicit GPUTracer(gles2::GLES2Decoder* decoder);
+ ~GPUTracer();
+
+ // Scheduled processing in decoder begins.
+ bool BeginDecoding();
+
+ // Scheduled processing in decoder ends.
+ bool EndDecoding();
+
+ // Begin a trace marker.
+ bool Begin(const std::string& name, GpuTracerSource source);
+
+ // End the last started trace marker.
+ bool End(GpuTracerSource source);
+
+ bool IsTracing();
+
+ // Retrieve the name of the current open trace.
+ // Returns empty string if no current open trace.
+ const std::string& CurrentName() const;
+
+ private:
+ // Trace Processing.
+ scoped_refptr<GPUTrace> CreateTrace(const std::string& name);
+ void Process();
+ void ProcessTraces();
+
+ void CalculateTimerOffset();
+ void IssueProcessTask();
+
+ scoped_refptr<Outputter> outputter_;
+ std::vector<TraceMarker> markers_[NUM_TRACER_SOURCES];
+ std::deque<scoped_refptr<GPUTrace> > traces_;
+
+ const unsigned char* gpu_trace_srv_category;
+ const unsigned char* gpu_trace_dev_category;
+ gles2::GLES2Decoder* decoder_;
+
+ int64 timer_offset_;
+ GpuTracerSource last_tracer_source_;
+
+ GpuTracerType tracer_type_;
+ bool gpu_timing_synced_;
+ bool gpu_executing_;
+ bool process_posted_;
+
+ DISALLOW_COPY_AND_ASSIGN(GPUTracer);
+};
+
+class Outputter : public base::RefCounted<Outputter> {
+ public:
+ virtual void Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) = 0;
+
+ protected:
+ virtual ~Outputter() {}
+ friend class base::RefCounted<Outputter>;
+};
+
+class TraceOutputter : public Outputter {
+ public:
+ static scoped_refptr<TraceOutputter> Create(const std::string& name);
+ virtual void Trace(const std::string& name,
+ int64 start_time,
+ int64 end_time) OVERRIDE;
+
+ protected:
+ friend class base::RefCounted<Outputter>;
+ explicit TraceOutputter(const std::string& name);
+ virtual ~TraceOutputter();
+
+ base::Thread named_thread_;
+ uint64 local_trace_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceOutputter);
+};
+
+class GPU_EXPORT GPUTrace
+ : public base::RefCounted<GPUTrace> {
+ public:
+ GPUTrace(scoped_refptr<Outputter> outputter,
+ const std::string& name,
+ int64 offset,
+ GpuTracerType tracer_type);
+
+ bool IsEnabled() { return tracer_type_ != kTracerTypeInvalid; }
+ const std::string& name() { return name_; }
+
+ void Start();
+ void End();
+ bool IsAvailable();
+ void Process();
+
+ private:
+ ~GPUTrace();
+
+ void Output();
+
+ friend class base::RefCounted<GPUTrace>;
+
+ std::string name_;
+ scoped_refptr<Outputter> outputter_;
+
+ int64 offset_;
+ int64 start_time_;
+ int64 end_time_;
+ GpuTracerType tracer_type_;
+ bool end_requested_;
+
+ GLuint queries_[2];
+
+ DISALLOW_COPY_AND_ASSIGN(GPUTrace);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_GPU_TRACER_H_
diff --git a/gpu/command_buffer/service/gpu_tracer_unittest.cc b/gpu/command_buffer/service/gpu_tracer_unittest.cc
new file mode 100644
index 0000000..fe91f70
--- /dev/null
+++ b/gpu/command_buffer/service/gpu_tracer_unittest.cc
@@ -0,0 +1,237 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <map>
+#include <set>
+
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+using ::testing::ReturnRef;
+using ::testing::ReturnPointee;
+using ::testing::NotNull;
+using ::testing::ElementsAreArray;
+using ::testing::ElementsAre;
+using ::testing::SetArrayArgument;
+using ::testing::AtLeast;
+using ::testing::SetArgPointee;
+using ::testing::Pointee;
+using ::testing::Unused;
+using ::testing::Invoke;
+using ::testing::_;
+
+class MockOutputter : public Outputter {
+ public:
+ MockOutputter() {}
+ MOCK_METHOD3(Trace,
+ void(const std::string& name, int64 start_time, int64 end_time));
+
+ protected:
+ ~MockOutputter() {}
+};
+
+class GlFakeQueries {
+ public:
+ GlFakeQueries() {}
+
+ void Reset() {
+ current_time_ = 0;
+ next_query_id_ = 23;
+ alloced_queries_.clear();
+ query_timestamp_.clear();
+ }
+
+ void SetCurrentGLTime(GLint64 current_time) { current_time_ = current_time; }
+
+ void GenQueriesARB(GLsizei n, GLuint* ids) {
+ for (GLsizei i = 0; i < n; i++) {
+ ids[i] = next_query_id_++;
+ alloced_queries_.insert(ids[i]);
+ }
+ }
+
+ void DeleteQueriesARB(GLsizei n, const GLuint* ids) {
+ for (GLsizei i = 0; i < n; i++) {
+ alloced_queries_.erase(ids[i]);
+ query_timestamp_.erase(ids[i]);
+ }
+ }
+
+ void GetQueryObjectiv(GLuint id, GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_QUERY_RESULT_AVAILABLE: {
+ std::map<GLuint, GLint64>::iterator it = query_timestamp_.find(id);
+ if (it != query_timestamp_.end() && it->second <= current_time_)
+ *params = 1;
+ else
+ *params = 0;
+ break;
+ }
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ void QueryCounter(GLuint id, GLenum target) {
+ switch (target) {
+ case GL_TIMESTAMP:
+ ASSERT_TRUE(alloced_queries_.find(id) != alloced_queries_.end());
+ query_timestamp_[id] = current_time_;
+ break;
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ void GetQueryObjectui64v(GLuint id, GLenum pname, GLuint64* params) {
+ switch (pname) {
+ case GL_QUERY_RESULT:
+ ASSERT_TRUE(query_timestamp_.find(id) != query_timestamp_.end());
+ *params = query_timestamp_.find(id)->second;
+ break;
+ default:
+ ASSERT_TRUE(false);
+ }
+ }
+
+ protected:
+ GLint64 current_time_;
+ GLuint next_query_id_;
+ std::set<GLuint> alloced_queries_;
+ std::map<GLuint, GLint64> query_timestamp_;
+};
+
+class BaseGpuTracerTest : public GpuServiceTest {
+ public:
+ BaseGpuTracerTest() {}
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void DoTraceTest() {
+ MockOutputter* outputter = new MockOutputter();
+ scoped_refptr<Outputter> outputter_ref = outputter;
+
+ SetupTimerQueryMocks();
+
+ // Expected results
+ const std::string trace_name("trace_test");
+ const int64 offset_time = 3231;
+ const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond;
+ const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond;
+ const int64 expect_start_time =
+ (start_timestamp / base::Time::kNanosecondsPerMicrosecond) +
+ offset_time;
+ const int64 expect_end_time =
+ (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time;
+
+ // Expected Outputter::Trace call
+ EXPECT_CALL(*outputter,
+ Trace(trace_name, expect_start_time, expect_end_time));
+
+ scoped_refptr<GPUTrace> trace =
+ new GPUTrace(outputter_ref, trace_name, offset_time,
+ GetTracerType());
+
+ gl_fake_queries_.SetCurrentGLTime(start_timestamp);
+ trace->Start();
+
+ // Shouldn't be available before End() call
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp);
+ EXPECT_FALSE(trace->IsAvailable());
+
+ trace->End();
+
+ // Shouldn't be available until the queries complete
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp -
+ base::Time::kNanosecondsPerMicrosecond);
+ EXPECT_FALSE(trace->IsAvailable());
+
+ // Now it should be available
+ gl_fake_queries_.SetCurrentGLTime(end_timestamp);
+ EXPECT_TRUE(trace->IsAvailable());
+
+ // Proces should output expected Trace results to MockOutputter
+ trace->Process();
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ gl_fake_queries_.Reset();
+ }
+
+ virtual void TearDown() {
+ gl_.reset();
+ gl_fake_queries_.Reset();
+ GpuServiceTest::TearDown();
+ }
+
+ virtual void SetupTimerQueryMocks() {
+ // Delegate query APIs used by GPUTrace to a GlFakeQueries
+ EXPECT_CALL(*gl_, GenQueriesARB(_, NotNull())).Times(AtLeast(1)).WillOnce(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GenQueriesARB));
+
+ EXPECT_CALL(*gl_, GetQueryObjectiv(_, GL_QUERY_RESULT_AVAILABLE, NotNull()))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GetQueryObjectiv));
+
+ EXPECT_CALL(*gl_, QueryCounter(_, GL_TIMESTAMP))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::QueryCounter));
+
+ EXPECT_CALL(*gl_, GetQueryObjectui64v(_, GL_QUERY_RESULT, NotNull()))
+ .Times(AtLeast(2))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::GetQueryObjectui64v));
+
+ EXPECT_CALL(*gl_, DeleteQueriesARB(2, NotNull()))
+ .Times(AtLeast(1))
+ .WillRepeatedly(
+ Invoke(&gl_fake_queries_, &GlFakeQueries::DeleteQueriesARB));
+ }
+
+ virtual GpuTracerType GetTracerType() = 0;
+
+ GlFakeQueries gl_fake_queries_;
+};
+
+class GpuARBTimerTracerTest : public BaseGpuTracerTest {
+ protected:
+ virtual GpuTracerType GetTracerType() OVERRIDE {
+ return kTracerTypeARBTimer;
+ }
+};
+
+class GpuDisjointTimerTracerTest : public BaseGpuTracerTest {
+ protected:
+ virtual GpuTracerType GetTracerType() OVERRIDE {
+ return kTracerTypeDisjointTimer;
+ }
+};
+
+TEST_F(GpuARBTimerTracerTest, GPUTrace) {
+ // Test basic timer query functionality
+ {
+ DoTraceTest();
+ }
+}
+
+TEST_F(GpuDisjointTimerTracerTest, GPUTrace) {
+ // Test basic timer query functionality
+ {
+ DoTraceTest();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/id_manager.cc b/gpu/command_buffer/service/id_manager.cc
new file mode 100644
index 0000000..be60d7b
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/id_manager.h"
+#include "base/logging.h"
+
+namespace gpu {
+namespace gles2 {
+
+IdManager::IdManager() {}
+
+IdManager::~IdManager() {}
+
+bool IdManager::AddMapping(GLuint client_id, GLuint service_id) {
+ std::pair<MapType::iterator, bool> result = id_map_.insert(
+ std::make_pair(client_id, service_id));
+ return result.second;
+}
+
+bool IdManager::RemoveMapping(GLuint client_id, GLuint service_id) {
+ MapType::iterator iter = id_map_.find(client_id);
+ if (iter != id_map_.end() && iter->second == service_id) {
+ id_map_.erase(iter);
+ return true;
+ }
+ return false;
+}
+
+bool IdManager::GetServiceId(GLuint client_id, GLuint* service_id) {
+ DCHECK(service_id);
+ MapType::iterator iter = id_map_.find(client_id);
+ if (iter != id_map_.end()) {
+ *service_id = iter->second;
+ return true;
+ }
+ return false;
+}
+
+bool IdManager::GetClientId(GLuint service_id, GLuint* client_id) {
+ DCHECK(client_id);
+ MapType::iterator end(id_map_.end());
+ for (MapType::iterator iter(id_map_.begin());
+ iter != end;
+ ++iter) {
+ if (iter->second == service_id) {
+ *client_id = iter->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/id_manager.h b/gpu/command_buffer/service/id_manager.h
new file mode 100644
index 0000000..0bc0674
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class maps one set of ids to another.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT IdManager {
+ public:
+ IdManager();
+ ~IdManager();
+
+ // Maps a client_id to a service_id. Return false if the client_id or
+ // service_id are already mapped to something else.
+ bool AddMapping(GLuint client_id, GLuint service_id);
+
+ // Unmaps a pair of ids. Returns false if the pair were not previously mapped.
+ bool RemoveMapping(GLuint client_id, GLuint service_id);
+
+ // Gets the corresponding service_id for the given client_id.
+ // Returns false if there is no corresponding service_id.
+ bool GetServiceId(GLuint client_id, GLuint* service_id);
+
+ // Gets the corresponding client_id for the given service_id.
+ // Returns false if there is no corresponding client_id.
+ bool GetClientId(GLuint service_id, GLuint* client_id);
+
+ private:
+ typedef base::hash_map<GLuint, GLuint> MapType;
+ MapType id_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/id_manager_unittest.cc b/gpu/command_buffer/service/id_manager_unittest.cc
new file mode 100644
index 0000000..015a442
--- /dev/null
+++ b/gpu/command_buffer/service/id_manager_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/id_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class IdManagerTest : public testing::Test {
+ public:
+ IdManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+
+ IdManager manager_;
+};
+
+TEST_F(IdManagerTest, Basic) {
+ const GLuint kClientId1 = 1;
+ const GLuint kClientId2 = 2;
+ const GLuint kServiceId1 = 201;
+ const GLuint kServiceId2 = 202;
+ // Check we can add an id
+ EXPECT_TRUE(manager_.AddMapping(kClientId1, kServiceId1));
+ // Check we can get that mapping
+ GLuint service_id = 0;
+ EXPECT_TRUE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_EQ(kServiceId1, service_id);
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(kServiceId1, &client_id));
+ EXPECT_EQ(kClientId1, client_id);
+ // Check that it fails if we get a non-existent id.
+ service_id = 0;
+ client_id = 0;
+ EXPECT_FALSE(manager_.GetServiceId(kClientId2, &service_id));
+ EXPECT_FALSE(manager_.GetClientId(kServiceId2, &client_id));
+ EXPECT_EQ(0u, service_id);
+ EXPECT_EQ(0u, client_id);
+ // Check we can add a second id.
+ EXPECT_TRUE(manager_.AddMapping(kClientId2, kServiceId2));
+ // Check we can get that mapping
+ service_id = 0;
+ EXPECT_TRUE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_EQ(kServiceId1, service_id);
+ EXPECT_TRUE(manager_.GetServiceId(kClientId2, &service_id));
+ EXPECT_EQ(kServiceId2, service_id);
+ client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(kServiceId1, &client_id));
+ EXPECT_EQ(kClientId1, client_id);
+ EXPECT_TRUE(manager_.GetClientId(kServiceId2, &client_id));
+ EXPECT_EQ(kClientId2, client_id);
+ // Check if we remove an id we can no longer get it.
+ EXPECT_TRUE(manager_.RemoveMapping(kClientId1, kServiceId1));
+ EXPECT_FALSE(manager_.GetServiceId(kClientId1, &service_id));
+ EXPECT_FALSE(manager_.GetClientId(kServiceId1, &client_id));
+ // Check we get an error if we try to remove a non-existent id.
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId1, kServiceId1));
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId2, kServiceId1));
+ EXPECT_FALSE(manager_.RemoveMapping(kClientId1, kServiceId2));
+ // Check we get an error if we try to map an existing id.
+ EXPECT_FALSE(manager_.AddMapping(kClientId2, kServiceId2));
+ EXPECT_FALSE(manager_.AddMapping(kClientId2, kServiceId1));
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/image_manager.cc b/gpu/command_buffer/service/image_manager.cc
new file mode 100644
index 0000000..46438c7
--- /dev/null
+++ b/gpu/command_buffer/service/image_manager.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/image_manager.h"
+
+#include "base/logging.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+namespace gles2 {
+
+ImageManager::ImageManager() {
+}
+
+ImageManager::~ImageManager() {
+}
+
+void ImageManager::Destroy(bool have_context) {
+ for (GLImageMap::const_iterator iter = images_.begin(); iter != images_.end();
+ ++iter)
+ iter->second.get()->Destroy(have_context);
+ images_.clear();
+}
+
+void ImageManager::AddImage(gfx::GLImage* image, int32 service_id) {
+ DCHECK(images_.find(service_id) == images_.end());
+ images_[service_id] = image;
+}
+
+void ImageManager::RemoveImage(int32 service_id) {
+ GLImageMap::iterator iter = images_.find(service_id);
+ DCHECK(iter != images_.end());
+ iter->second.get()->Destroy(true);
+ images_.erase(iter);
+}
+
+gfx::GLImage* ImageManager::LookupImage(int32 service_id) {
+ GLImageMap::const_iterator iter = images_.find(service_id);
+ if (iter != images_.end())
+ return iter->second.get();
+
+ return NULL;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/image_manager.h b/gpu/command_buffer/service/image_manager.h
new file mode 100644
index 0000000..0a440f9
--- /dev/null
+++ b/gpu/command_buffer/service/image_manager.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// This class keeps track of the images and their state.
+class GPU_EXPORT ImageManager {
+ public:
+ ImageManager();
+ ~ImageManager();
+
+ void Destroy(bool have_context);
+ void AddImage(gfx::GLImage* image, int32 service_id);
+ void RemoveImage(int32 service_id);
+ gfx::GLImage* LookupImage(int32 service_id);
+
+ private:
+ typedef base::hash_map<int32, scoped_refptr<gfx::GLImage> > GLImageMap;
+ GLImageMap images_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageManager);
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_IMAGE_MANAGER_H_
diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc
new file mode 100644
index 0000000..45199a7
--- /dev/null
+++ b/gpu/command_buffer/service/in_process_command_buffer.cc
@@ -0,0 +1,778 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/in_process_command_buffer.h"
+
+#include <queue>
+#include <set>
+#include <utility>
+
+#include <GLES2/gl2.h>
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES 1
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_share_group.h"
+
+#if defined(OS_ANDROID)
+#include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
+#include "ui/gl/android/surface_texture.h"
+#endif
+
+namespace gpu {
+
+namespace {
+
+template <typename T>
+static void RunTaskWithResult(base::Callback<T(void)> task,
+ T* result,
+ base::WaitableEvent* completion) {
+ *result = task.Run();
+ completion->Signal();
+}
+
+class GpuInProcessThread
+ : public base::Thread,
+ public InProcessCommandBuffer::Service,
+ public base::RefCountedThreadSafe<GpuInProcessThread> {
+ public:
+ GpuInProcessThread();
+
+ virtual void AddRef() const OVERRIDE {
+ base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
+ }
+ virtual void Release() const OVERRIDE {
+ base::RefCountedThreadSafe<GpuInProcessThread>::Release();
+ }
+
+ virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
+ virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
+ virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
+ virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
+ OVERRIDE;
+
+ private:
+ virtual ~GpuInProcessThread();
+ friend class base::RefCountedThreadSafe<GpuInProcessThread>;
+
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
+ DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
+};
+
+GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
+ Start();
+}
+
+GpuInProcessThread::~GpuInProcessThread() {
+ Stop();
+}
+
+void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
+ message_loop()->PostTask(FROM_HERE, task);
+}
+
+void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
+ message_loop()->PostDelayedTask(
+ FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
+}
+
+scoped_refptr<gles2::ShaderTranslatorCache>
+GpuInProcessThread::shader_translator_cache() {
+ if (!shader_translator_cache_.get())
+ shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
+ return shader_translator_cache_;
+}
+
+base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
+ LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<base::Lock> default_thread_clients_lock_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+class ScopedEvent {
+ public:
+ ScopedEvent(base::WaitableEvent* event) : event_(event) {}
+ ~ScopedEvent() { event_->Signal(); }
+
+ private:
+ base::WaitableEvent* event_;
+};
+
+class SyncPointManager {
+ public:
+ SyncPointManager();
+ ~SyncPointManager();
+
+ uint32 GenerateSyncPoint();
+ void RetireSyncPoint(uint32 sync_point);
+
+ bool IsSyncPointPassed(uint32 sync_point);
+ void WaitSyncPoint(uint32 sync_point);
+
+private:
+ // This lock protects access to pending_sync_points_ and next_sync_point_ and
+ // is used with the ConditionVariable to signal when a sync point is retired.
+ base::Lock lock_;
+ std::set<uint32> pending_sync_points_;
+ uint32 next_sync_point_;
+ base::ConditionVariable cond_var_;
+};
+
+SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
+
+SyncPointManager::~SyncPointManager() {
+ DCHECK_EQ(pending_sync_points_.size(), 0U);
+}
+
+uint32 SyncPointManager::GenerateSyncPoint() {
+ base::AutoLock lock(lock_);
+ uint32 sync_point = next_sync_point_++;
+ DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
+ pending_sync_points_.insert(sync_point);
+ return sync_point;
+}
+
+void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ DCHECK(pending_sync_points_.count(sync_point));
+ pending_sync_points_.erase(sync_point);
+ cond_var_.Broadcast();
+}
+
+bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ return pending_sync_points_.count(sync_point) == 0;
+}
+
+void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ while (pending_sync_points_.count(sync_point)) {
+ cond_var_.Wait();
+ }
+}
+
+base::LazyInstance<SyncPointManager> g_sync_point_manager =
+ LAZY_INSTANCE_INITIALIZER;
+
+bool WaitSyncPoint(uint32 sync_point) {
+ g_sync_point_manager.Get().WaitSyncPoint(sync_point);
+ return true;
+}
+
+} // anonyous namespace
+
+InProcessCommandBuffer::Service::Service() {}
+
+InProcessCommandBuffer::Service::~Service() {}
+
+scoped_refptr<gles2::MailboxManager>
+InProcessCommandBuffer::Service::mailbox_manager() {
+ if (!mailbox_manager_.get())
+ mailbox_manager_ = new gles2::MailboxManager();
+ return mailbox_manager_;
+}
+
+scoped_refptr<InProcessCommandBuffer::Service>
+InProcessCommandBuffer::GetDefaultService() {
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ scoped_refptr<Service> service;
+ if (!default_thread_clients_.Get().empty()) {
+ InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
+ service = other->service_;
+ DCHECK(service.get());
+ } else {
+ service = new GpuInProcessThread;
+ }
+ return service;
+}
+
+InProcessCommandBuffer::InProcessCommandBuffer(
+ const scoped_refptr<Service>& service)
+ : context_lost_(false),
+ idle_work_pending_(false),
+ last_put_offset_(-1),
+ flush_event_(false, false),
+ service_(service.get() ? service : GetDefaultService()),
+ gpu_thread_weak_ptr_factory_(this) {
+ if (!service.get()) {
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ default_thread_clients_.Get().insert(this);
+ }
+}
+
+InProcessCommandBuffer::~InProcessCommandBuffer() {
+ Destroy();
+ base::AutoLock lock(default_thread_clients_lock_.Get());
+ default_thread_clients_.Get().erase(this);
+}
+
+void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
+ CheckSequencedThread();
+ DCHECK(!surface_->IsOffscreen());
+ surface_->Resize(size);
+}
+
+bool InProcessCommandBuffer::MakeCurrent() {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+
+ if (!context_lost_ && decoder_->MakeCurrent())
+ return true;
+ DLOG(ERROR) << "Context lost because MakeCurrent failed.";
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(gpu::error::kLostContext);
+ return false;
+}
+
+void InProcessCommandBuffer::PumpCommands() {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+
+ if (!MakeCurrent())
+ return;
+
+ gpu_scheduler_->PutChanged();
+}
+
+bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
+ CheckSequencedThread();
+ command_buffer_lock_.AssertAcquired();
+ command_buffer_->SetGetBuffer(transfer_buffer_id);
+ return true;
+}
+
+bool InProcessCommandBuffer::Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ const base::Closure& context_lost_callback,
+ InProcessCommandBuffer* share_group) {
+ DCHECK(!share_group || service_.get() == share_group->service_.get());
+ context_lost_callback_ = WrapCallback(context_lost_callback);
+
+ if (surface.get()) {
+ // GPU thread must be the same as client thread due to GLSurface not being
+ // thread safe.
+ sequence_checker_.reset(new base::SequenceChecker);
+ surface_ = surface;
+ }
+
+ gpu::Capabilities capabilities;
+ InitializeOnGpuThreadParams params(is_offscreen,
+ window,
+ size,
+ attribs,
+ gpu_preference,
+ &capabilities,
+ share_group);
+
+ base::Callback<bool(void)> init_task =
+ base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
+ base::Unretained(this),
+ params);
+
+ base::WaitableEvent completion(true, false);
+ bool result = false;
+ QueueTask(
+ base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
+ completion.Wait();
+
+ if (result)
+ capabilities_ = capabilities;
+
+ return result;
+}
+
+bool InProcessCommandBuffer::InitializeOnGpuThread(
+ const InitializeOnGpuThreadParams& params) {
+ CheckSequencedThread();
+ gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
+
+ DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
+
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ manager->Initialize();
+
+ scoped_ptr<CommandBufferService> command_buffer(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ command_buffer->SetPutOffsetChangeCallback(base::Bind(
+ &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
+ command_buffer->SetParseErrorCallback(base::Bind(
+ &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
+
+ if (!command_buffer->Initialize()) {
+ LOG(ERROR) << "Could not initialize command buffer.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ gl_share_group_ = params.context_group
+ ? params.context_group->gl_share_group_.get()
+ : new gfx::GLShareGroup;
+
+#if defined(OS_ANDROID)
+ stream_texture_manager_.reset(new StreamTextureManagerInProcess);
+#endif
+
+ bool bind_generates_resource = false;
+ decoder_.reset(gles2::GLES2Decoder::Create(
+ params.context_group
+ ? params.context_group->decoder_->GetContextGroup()
+ : new gles2::ContextGroup(service_->mailbox_manager(),
+ NULL,
+ service_->shader_translator_cache(),
+ NULL,
+ bind_generates_resource)));
+
+ gpu_scheduler_.reset(
+ new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
+ command_buffer->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_ = command_buffer.Pass();
+
+ decoder_->set_engine(gpu_scheduler_.get());
+
+ if (!surface_.get()) {
+ if (params.is_offscreen)
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
+ else
+ surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
+ }
+
+ if (!surface_.get()) {
+ LOG(ERROR) << "Could not create GLSurface.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ if (service_->UseVirtualizedGLContexts()) {
+ context_ = gl_share_group_->GetSharedContext();
+ if (!context_.get()) {
+ context_ = gfx::GLContext::CreateGLContext(
+ gl_share_group_.get(), surface_.get(), params.gpu_preference);
+ gl_share_group_->SetSharedContext(context_.get());
+ }
+
+ context_ = new GLContextVirtual(
+ gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
+ if (context_->Initialize(surface_.get(), params.gpu_preference)) {
+ VLOG(1) << "Created virtual GL context.";
+ } else {
+ context_ = NULL;
+ }
+ } else {
+ context_ = gfx::GLContext::CreateGLContext(
+ gl_share_group_.get(), surface_.get(), params.gpu_preference);
+ }
+
+ if (!context_.get()) {
+ LOG(ERROR) << "Could not create GLContext.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ if (!context_->MakeCurrent(surface_.get())) {
+ LOG(ERROR) << "Could not make context current.";
+ DestroyOnGpuThread();
+ return false;
+ }
+
+ gles2::DisallowedFeatures disallowed_features;
+ disallowed_features.gpu_memory_manager = true;
+ if (!decoder_->Initialize(surface_,
+ context_,
+ params.is_offscreen,
+ params.size,
+ disallowed_features,
+ params.attribs)) {
+ LOG(ERROR) << "Could not initialize decoder.";
+ DestroyOnGpuThread();
+ return false;
+ }
+ *params.capabilities = decoder_->GetCapabilities();
+
+ if (!params.is_offscreen) {
+ decoder_->SetResizeCallback(base::Bind(
+ &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
+ }
+ decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
+
+ return true;
+}
+
+void InProcessCommandBuffer::Destroy() {
+ CheckSequencedThread();
+
+ base::WaitableEvent completion(true, false);
+ bool result = false;
+ base::Callback<bool(void)> destroy_task = base::Bind(
+ &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
+ QueueTask(
+ base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
+ completion.Wait();
+}
+
+bool InProcessCommandBuffer::DestroyOnGpuThread() {
+ CheckSequencedThread();
+ gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
+ command_buffer_.reset();
+ // Clean up GL resources if possible.
+ bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
+ if (decoder_) {
+ decoder_->Destroy(have_context);
+ decoder_.reset();
+ }
+ context_ = NULL;
+ surface_ = NULL;
+ gl_share_group_ = NULL;
+#if defined(OS_ANDROID)
+ stream_texture_manager_.reset();
+#endif
+
+ return true;
+}
+
+void InProcessCommandBuffer::CheckSequencedThread() {
+ DCHECK(!sequence_checker_ ||
+ sequence_checker_->CalledOnValidSequencedThread());
+}
+
+void InProcessCommandBuffer::OnContextLost() {
+ CheckSequencedThread();
+ if (!context_lost_callback_.is_null()) {
+ context_lost_callback_.Run();
+ context_lost_callback_.Reset();
+ }
+
+ context_lost_ = true;
+}
+
+CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
+ CheckSequencedThread();
+ base::AutoLock lock(state_after_last_flush_lock_);
+ if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state_after_last_flush_;
+ return last_state_;
+}
+
+CommandBuffer::State InProcessCommandBuffer::GetLastState() {
+ CheckSequencedThread();
+ return last_state_;
+}
+
+int32 InProcessCommandBuffer::GetLastToken() {
+ CheckSequencedThread();
+ GetStateFast();
+ return last_state_.token;
+}
+
+void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
+ CheckSequencedThread();
+ ScopedEvent handle_flush(&flush_event_);
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->Flush(put_offset);
+ {
+ // Update state before signaling the flush event.
+ base::AutoLock lock(state_after_last_flush_lock_);
+ state_after_last_flush_ = command_buffer_->GetLastState();
+ }
+ DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
+ (error::IsError(state_after_last_flush_.error) && context_lost_));
+
+ // If we've processed all pending commands but still have pending queries,
+ // pump idle work until the query is passed.
+ if (put_offset == state_after_last_flush_.get_offset &&
+ gpu_scheduler_->HasMoreWork()) {
+ ScheduleIdleWorkOnGpuThread();
+ }
+}
+
+void InProcessCommandBuffer::PerformIdleWork() {
+ CheckSequencedThread();
+ idle_work_pending_ = false;
+ base::AutoLock lock(command_buffer_lock_);
+ if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
+ gpu_scheduler_->PerformIdleWork();
+ ScheduleIdleWorkOnGpuThread();
+ }
+}
+
+void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
+ CheckSequencedThread();
+ if (idle_work_pending_)
+ return;
+ idle_work_pending_ = true;
+ service_->ScheduleIdleWork(
+ base::Bind(&InProcessCommandBuffer::PerformIdleWork,
+ gpu_thread_weak_ptr_));
+}
+
+void InProcessCommandBuffer::Flush(int32 put_offset) {
+ CheckSequencedThread();
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ if (last_put_offset_ == put_offset)
+ return;
+
+ last_put_offset_ = put_offset;
+ base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
+ gpu_thread_weak_ptr_,
+ put_offset);
+ QueueTask(task);
+}
+
+void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
+ CheckSequencedThread();
+ while (!InRange(start, end, GetLastToken()) &&
+ last_state_.error == gpu::error::kNoError)
+ flush_event_.Wait();
+}
+
+void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
+ CheckSequencedThread();
+
+ GetStateFast();
+ while (!InRange(start, end, last_state_.get_offset) &&
+ last_state_.error == gpu::error::kNoError) {
+ flush_event_.Wait();
+ GetStateFast();
+ }
+}
+
+void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
+ CheckSequencedThread();
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ {
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->SetGetBuffer(shm_id);
+ last_put_offset_ = 0;
+ }
+ {
+ base::AutoLock lock(state_after_last_flush_lock_);
+ state_after_last_flush_ = command_buffer_->GetLastState();
+ }
+}
+
+scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
+ int32* id) {
+ CheckSequencedThread();
+ base::AutoLock lock(command_buffer_lock_);
+ return command_buffer_->CreateTransferBuffer(size, id);
+}
+
+void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
+ CheckSequencedThread();
+ base::Closure task =
+ base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
+ base::Unretained(this),
+ id);
+
+ QueueTask(task);
+}
+
+void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
+ base::AutoLock lock(command_buffer_lock_);
+ command_buffer_->DestroyTransferBuffer(id);
+}
+
+gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
+ return capabilities_;
+}
+
+gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
+ size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) {
+ NOTREACHED();
+ return NULL;
+}
+
+void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
+ NOTREACHED();
+}
+
+uint32 InProcessCommandBuffer::InsertSyncPoint() {
+ uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
+ QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point));
+ return sync_point;
+}
+
+uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
+ return g_sync_point_manager.Get().GenerateSyncPoint();
+}
+
+void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
+ QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point));
+}
+
+void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ if (mailbox_manager->UsesSync()) {
+ bool make_current_success = false;
+ {
+ base::AutoLock lock(command_buffer_lock_);
+ make_current_success = MakeCurrent();
+ }
+ if (make_current_success)
+ mailbox_manager->PushTextureUpdates();
+ }
+ g_sync_point_manager.Get().RetireSyncPoint(sync_point);
+}
+
+void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
+ const base::Closure& callback) {
+ CheckSequencedThread();
+ QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
+ base::Unretained(this),
+ sync_point,
+ WrapCallback(callback)));
+}
+
+void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
+ unsigned sync_point,
+ const base::Closure& callback) {
+ if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
+ callback.Run();
+ } else {
+ service_->ScheduleIdleWork(
+ base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
+ gpu_thread_weak_ptr_,
+ sync_point,
+ callback));
+ }
+}
+
+void InProcessCommandBuffer::SignalQuery(unsigned query_id,
+ const base::Closure& callback) {
+ CheckSequencedThread();
+ QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
+ base::Unretained(this),
+ query_id,
+ WrapCallback(callback)));
+}
+
+void InProcessCommandBuffer::SignalQueryOnGpuThread(
+ unsigned query_id,
+ const base::Closure& callback) {
+ gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
+ DCHECK(query_manager_);
+
+ gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
+ if (!query)
+ callback.Run();
+ else
+ query->AddCallback(callback);
+}
+
+void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
+
+uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
+ base::WaitableEvent completion(true, false);
+ uint32 stream_id = 0;
+ base::Callback<uint32(void)> task =
+ base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
+ base::Unretained(this),
+ texture_id);
+ QueueTask(
+ base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
+ completion.Wait();
+ return stream_id;
+}
+
+uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
+ uint32 client_texture_id) {
+#if defined(OS_ANDROID)
+ return stream_texture_manager_->CreateStreamTexture(
+ client_texture_id, decoder_->GetContextGroup()->texture_manager());
+#else
+ return 0;
+#endif
+}
+
+gpu::error::Error InProcessCommandBuffer::GetLastError() {
+ CheckSequencedThread();
+ return last_state_.error;
+}
+
+bool InProcessCommandBuffer::Initialize() {
+ NOTREACHED();
+ return false;
+}
+
+namespace {
+
+void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Closure& callback) {
+ if (!loop->BelongsToCurrentThread()) {
+ loop->PostTask(FROM_HERE, callback);
+ } else {
+ callback.Run();
+ }
+}
+
+void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
+ DCHECK(callback.get());
+ callback->Run();
+}
+
+} // anonymous namespace
+
+base::Closure InProcessCommandBuffer::WrapCallback(
+ const base::Closure& callback) {
+ // Make sure the callback gets deleted on the target thread by passing
+ // ownership.
+ scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
+ base::Closure callback_on_client_thread =
+ base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
+ base::Closure wrapped_callback =
+ base::Bind(&PostCallback, base::MessageLoopProxy::current(),
+ callback_on_client_thread);
+ return wrapped_callback;
+}
+
+#if defined(OS_ANDROID)
+scoped_refptr<gfx::SurfaceTexture>
+InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
+ DCHECK(stream_texture_manager_);
+ return stream_texture_manager_->GetSurfaceTexture(stream_id);
+}
+#endif
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/in_process_command_buffer.h b/gpu/command_buffer/service/in_process_command_buffer.h
new file mode 100644
index 0000000..b650725
--- /dev/null
+++ b/gpu/command_buffer/service/in_process_command_buffer.h
@@ -0,0 +1,235 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
+
+#include <map>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/gpu_export.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+
+namespace base {
+class SequenceChecker;
+}
+
+namespace gfx {
+class GLContext;
+class GLShareGroup;
+class GLSurface;
+class Size;
+}
+
+#if defined(OS_ANDROID)
+namespace gfx {
+class SurfaceTexture;
+}
+namespace gpu {
+class StreamTextureManagerInProcess;
+}
+#endif
+
+namespace gpu {
+
+namespace gles2 {
+class GLES2Decoder;
+class MailboxManager;
+class ShaderTranslatorCache;
+}
+
+class CommandBufferServiceBase;
+class GpuScheduler;
+class TransferBufferManagerInterface;
+
+// This class provides a thread-safe interface to the global GPU service (for
+// example GPU thread) when being run in single process mode.
+// However, the behavior for accessing one context (i.e. one instance of this
+// class) from different client threads is undefined.
+class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
+ public GpuControl {
+ public:
+ class Service;
+ explicit InProcessCommandBuffer(const scoped_refptr<Service>& service);
+ virtual ~InProcessCommandBuffer();
+
+ // If |surface| is not NULL, use it directly; in this case, the command
+ // buffer gpu thread must be the same as the client thread. Otherwise create
+ // a new GLSurface.
+ bool Initialize(scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ const base::Closure& context_lost_callback,
+ InProcessCommandBuffer* share_group);
+ void Destroy();
+
+ // CommandBuffer implementation:
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void Flush(int32 put_offset) OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int32 shm_id) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+ virtual gpu::error::Error GetLastError() OVERRIDE;
+
+ // GpuControl implementation:
+ virtual gpu::Capabilities GetCapabilities() OVERRIDE;
+ virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) OVERRIDE;
+ virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
+ virtual uint32 InsertSyncPoint() OVERRIDE;
+ virtual uint32 InsertFutureSyncPoint() OVERRIDE;
+ virtual void RetireSyncPoint(uint32 sync_point) OVERRIDE;
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SignalQuery(uint32 query_id,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+ virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
+
+ // The serializer interface to the GPU service (i.e. thread).
+ class Service {
+ public:
+ Service();
+ virtual ~Service();
+
+ virtual void AddRef() const = 0;
+ virtual void Release() const = 0;
+
+ // Queues a task to run as soon as possible.
+ virtual void ScheduleTask(const base::Closure& task) = 0;
+
+ // Schedules |callback| to run at an appropriate time for performing idle
+ // work.
+ virtual void ScheduleIdleWork(const base::Closure& task) = 0;
+
+ virtual bool UseVirtualizedGLContexts() = 0;
+ virtual scoped_refptr<gles2::ShaderTranslatorCache>
+ shader_translator_cache() = 0;
+ scoped_refptr<gles2::MailboxManager> mailbox_manager();
+
+ private:
+ scoped_refptr<gles2::MailboxManager> mailbox_manager_;
+ };
+
+#if defined(OS_ANDROID)
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id);
+#endif
+
+ private:
+ struct InitializeOnGpuThreadParams {
+ bool is_offscreen;
+ gfx::AcceleratedWidget window;
+ const gfx::Size& size;
+ const std::vector<int32>& attribs;
+ gfx::GpuPreference gpu_preference;
+ gpu::Capabilities* capabilities; // Ouptut.
+ InProcessCommandBuffer* context_group;
+
+ InitializeOnGpuThreadParams(bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const std::vector<int32>& attribs,
+ gfx::GpuPreference gpu_preference,
+ gpu::Capabilities* capabilities,
+ InProcessCommandBuffer* share_group)
+ : is_offscreen(is_offscreen),
+ window(window),
+ size(size),
+ attribs(attribs),
+ gpu_preference(gpu_preference),
+ capabilities(capabilities),
+ context_group(share_group) {}
+ };
+
+ bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
+ bool DestroyOnGpuThread();
+ void FlushOnGpuThread(int32 put_offset);
+ void ScheduleIdleWorkOnGpuThread();
+ uint32 CreateStreamTextureOnGpuThread(uint32 client_texture_id);
+ bool MakeCurrent();
+ base::Closure WrapCallback(const base::Closure& callback);
+ State GetStateFast();
+ void QueueTask(const base::Closure& task) { service_->ScheduleTask(task); }
+ void CheckSequencedThread();
+ void RetireSyncPointOnGpuThread(uint32 sync_point);
+ void SignalSyncPointOnGpuThread(uint32 sync_point,
+ const base::Closure& callback);
+ void SignalQueryOnGpuThread(unsigned query_id, const base::Closure& callback);
+ void DestroyTransferBufferOnGpuThread(int32 id);
+
+ // Callbacks:
+ void OnContextLost();
+ void OnResizeView(gfx::Size size, float scale_factor);
+ bool GetBufferChanged(int32 transfer_buffer_id);
+ void PumpCommands();
+ void PerformIdleWork();
+
+ static scoped_refptr<Service> GetDefaultService();
+
+ // Members accessed on the gpu thread (possibly with the exception of
+ // creation):
+ bool context_lost_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<gles2::GLES2Decoder> decoder_;
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+ base::Closure context_lost_callback_;
+ bool idle_work_pending_; // Used to throttle PerformIdleWork.
+
+ // Members accessed on the client thread:
+ State last_state_;
+ int32 last_put_offset_;
+ gpu::Capabilities capabilities_;
+
+ // Accessed on both threads:
+ scoped_ptr<CommandBufferServiceBase> command_buffer_;
+ base::Lock command_buffer_lock_;
+ base::WaitableEvent flush_event_;
+ scoped_refptr<Service> service_;
+ State state_after_last_flush_;
+ base::Lock state_after_last_flush_lock_;
+ scoped_refptr<gfx::GLShareGroup> gl_share_group_;
+
+#if defined(OS_ANDROID)
+ scoped_ptr<StreamTextureManagerInProcess> stream_texture_manager_;
+#endif
+
+ // Only used with explicit scheduling and the gpu thread is the same as
+ // the client thread.
+ scoped_ptr<base::SequenceChecker> sequence_checker_;
+
+ base::WeakPtr<InProcessCommandBuffer> gpu_thread_weak_ptr_;
+ base::WeakPtrFactory<InProcessCommandBuffer> gpu_thread_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InProcessCommandBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
diff --git a/gpu/command_buffer/service/logger.cc b/gpu/command_buffer/service/logger.cc
new file mode 100644
index 0000000..1fd2933
--- /dev/null
+++ b/gpu/command_buffer/service/logger.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/logger.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+
+namespace gpu {
+namespace gles2 {
+
+Logger::Logger(const DebugMarkerManager* debug_marker_manager)
+ : debug_marker_manager_(debug_marker_manager),
+ log_message_count_(0),
+ log_synthesized_gl_errors_(true) {
+ Logger* this_temp = this;
+ this_in_hex_ = std::string("GroupMarkerNotSet(crbug.com/242999)!:") +
+ base::HexEncode(&this_temp, sizeof(this_temp));
+}
+
+Logger::~Logger() {}
+
+void Logger::LogMessage(
+ const char* filename, int line, const std::string& msg) {
+ if (log_message_count_ < kMaxLogMessages ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGLErrorLimit)) {
+ std::string prefixed_msg(std::string("[") + GetLogPrefix() + "]" + msg);
+ ++log_message_count_;
+ // LOG this unless logging is turned off as any chromium code that
+ // generates these errors probably has a bug.
+ if (log_synthesized_gl_errors_) {
+ ::logging::LogMessage(
+ filename, line, ::logging::LOG_ERROR).stream() << prefixed_msg;
+ }
+ if (!msg_callback_.is_null()) {
+ msg_callback_.Run(0, prefixed_msg);
+ }
+ } else {
+ if (log_message_count_ == kMaxLogMessages) {
+ ++log_message_count_;
+ LOG(ERROR)
+ << "Too many GL errors, not reporting any more for this context."
+ << " use --disable-gl-error-limit to see all errors.";
+ }
+ }
+}
+
+const std::string& Logger::GetLogPrefix() const {
+ const std::string& prefix(debug_marker_manager_->GetMarker());
+ return prefix.empty() ? this_in_hex_ : prefix;
+}
+
+void Logger::SetMsgCallback(const MsgCallback& callback) {
+ msg_callback_ = callback;
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/logger.h b/gpu/command_buffer/service/logger.h
new file mode 100644
index 0000000..4691443
--- /dev/null
+++ b/gpu/command_buffer/service/logger.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the Logger class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+typedef base::Callback<void(int32 id, const std::string& msg)> MsgCallback;
+
+class DebugMarkerManager;
+
+class GPU_EXPORT Logger {
+ public:
+ static const int kMaxLogMessages = 256;
+
+ explicit Logger(const DebugMarkerManager* debug_marker_manager);
+ ~Logger();
+
+ void LogMessage(const char* filename, int line, const std::string& msg);
+ const std::string& GetLogPrefix() const;
+
+ // Defaults to true. Set to false for the gpu_unittests as they
+ // are explicitly checking errors are generated and so don't need the numerous
+ // messages. Otherwise, chromium code that generates these errors likely has a
+ // bug.
+ void set_log_synthesized_gl_errors(bool enabled) {
+ log_synthesized_gl_errors_ = enabled;
+ }
+
+ void SetMsgCallback(const MsgCallback& callback);
+
+ private:
+ // Uses the current marker to add information to logs.
+ const DebugMarkerManager* debug_marker_manager_;
+ std::string this_in_hex_;
+
+ int log_message_count_;
+ bool log_synthesized_gl_errors_;
+
+ MsgCallback msg_callback_;
+ DISALLOW_COPY_AND_ASSIGN(Logger);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_LOGGER_H_
+
diff --git a/gpu/command_buffer/service/mailbox_manager.cc b/gpu/command_buffer/service/mailbox_manager.cc
new file mode 100644
index 0000000..e6962df
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+
+#include <algorithm>
+
+#include "crypto/random.h"
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+MailboxManager::MailboxManager()
+ : mailbox_to_textures_(std::ptr_fun(&MailboxManager::TargetNameLess)),
+ sync_(MailboxSynchronizer::GetInstance()) {
+}
+
+MailboxManager::~MailboxManager() {
+ DCHECK(mailbox_to_textures_.empty());
+ DCHECK(textures_to_mailboxes_.empty());
+}
+
+Texture* MailboxManager::ConsumeTexture(unsigned target,
+ const Mailbox& mailbox) {
+ TargetName target_name(target, mailbox);
+ MailboxToTextureMap::iterator it =
+ mailbox_to_textures_.find(target_name);
+ if (it != mailbox_to_textures_.end())
+ return it->second->first;
+
+ if (sync_) {
+ // See if it's visible in another mailbox manager, and if so make it visible
+ // here too.
+ Texture* texture = sync_->CreateTextureFromMailbox(target, mailbox);
+ if (texture) {
+ InsertTexture(target_name, texture);
+ DCHECK_EQ(0U, texture->refs_.size());
+ }
+ return texture;
+ }
+
+ return NULL;
+}
+
+void MailboxManager::ProduceTexture(unsigned target,
+ const Mailbox& mailbox,
+ Texture* texture) {
+ TargetName target_name(target, mailbox);
+ MailboxToTextureMap::iterator it = mailbox_to_textures_.find(target_name);
+ if (it != mailbox_to_textures_.end()) {
+ if (it->second->first == texture)
+ return;
+ TextureToMailboxMap::iterator texture_it = it->second;
+ mailbox_to_textures_.erase(it);
+ textures_to_mailboxes_.erase(texture_it);
+ }
+ InsertTexture(target_name, texture);
+}
+
+void MailboxManager::InsertTexture(TargetName target_name, Texture* texture) {
+ texture->SetMailboxManager(this);
+ TextureToMailboxMap::iterator texture_it =
+ textures_to_mailboxes_.insert(std::make_pair(texture, target_name));
+ mailbox_to_textures_.insert(std::make_pair(target_name, texture_it));
+ DCHECK_EQ(mailbox_to_textures_.size(), textures_to_mailboxes_.size());
+}
+
+void MailboxManager::TextureDeleted(Texture* texture) {
+ std::pair<TextureToMailboxMap::iterator,
+ TextureToMailboxMap::iterator> range =
+ textures_to_mailboxes_.equal_range(texture);
+ for (TextureToMailboxMap::iterator it = range.first;
+ it != range.second; ++it) {
+ size_t count = mailbox_to_textures_.erase(it->second);
+ DCHECK(count == 1);
+ }
+ textures_to_mailboxes_.erase(range.first, range.second);
+ DCHECK_EQ(mailbox_to_textures_.size(), textures_to_mailboxes_.size());
+
+ if (sync_)
+ sync_->TextureDeleted(texture);
+}
+
+void MailboxManager::PushTextureUpdates() {
+ if (sync_)
+ sync_->PushTextureUpdates(this);
+}
+
+void MailboxManager::PullTextureUpdates() {
+ if (sync_)
+ sync_->PullTextureUpdates(this);
+}
+
+MailboxManager::TargetName::TargetName(unsigned target, const Mailbox& mailbox)
+ : target(target),
+ mailbox(mailbox) {
+}
+
+bool MailboxManager::TargetNameLess(const MailboxManager::TargetName& lhs,
+ const MailboxManager::TargetName& rhs) {
+ if (lhs.target != rhs.target)
+ return lhs.target < rhs.target;
+ return lhs.mailbox < rhs.mailbox;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_manager.h b/gpu/command_buffer/service/mailbox_manager.h
new file mode 100644
index 0000000..e1b36cb
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+
+#include <functional>
+#include <map>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/gpu_export.h"
+
+typedef signed char GLbyte;
+
+namespace gpu {
+namespace gles2 {
+
+class MailboxSynchronizer;
+class Texture;
+class TextureManager;
+
+// Manages resources scoped beyond the context or context group level.
+class GPU_EXPORT MailboxManager : public base::RefCounted<MailboxManager> {
+ public:
+ MailboxManager();
+
+ // Look up the texture definition from the named mailbox.
+ Texture* ConsumeTexture(unsigned target, const Mailbox& mailbox);
+
+ // Put the texture into the named mailbox.
+ void ProduceTexture(unsigned target,
+ const Mailbox& mailbox,
+ Texture* texture);
+
+ // Returns whether this manager synchronizes with other instances.
+ bool UsesSync() { return sync_ != NULL; }
+
+ // Used with the MailboxSynchronizer to push/pull texture state to/from
+ // other manager instances.
+ void PushTextureUpdates();
+ void PullTextureUpdates();
+
+ // Destroy any mailbox that reference the given texture.
+ void TextureDeleted(Texture* texture);
+
+ private:
+ friend class base::RefCounted<MailboxManager>;
+ friend class MailboxSynchronizer;
+
+ ~MailboxManager();
+
+ struct TargetName {
+ TargetName(unsigned target, const Mailbox& mailbox);
+ unsigned target;
+ Mailbox mailbox;
+ };
+ void InsertTexture(TargetName target_name, Texture* texture);
+
+ static bool TargetNameLess(const TargetName& lhs, const TargetName& rhs);
+
+ // This is a bidirectional map between mailbox and textures. We can have
+ // multiple mailboxes per texture, but one texture per mailbox. We keep an
+ // iterator in the MailboxToTextureMap to be able to manage changes to
+ // the TextureToMailboxMap efficiently.
+ typedef std::multimap<Texture*, TargetName> TextureToMailboxMap;
+ typedef std::map<TargetName,
+ TextureToMailboxMap::iterator,
+ std::pointer_to_binary_function<const TargetName&,
+ const TargetName&,
+ bool> > MailboxToTextureMap;
+
+ MailboxToTextureMap mailbox_to_textures_;
+ TextureToMailboxMap textures_to_mailboxes_;
+
+ MailboxSynchronizer* sync_;
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxManager);
+};
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/mailbox_manager_unittest.cc b/gpu/command_buffer/service/mailbox_manager_unittest.cc
new file mode 100644
index 0000000..df1cd4e
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_manager_unittest.cc
@@ -0,0 +1,479 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+
+namespace gpu {
+namespace gles2 {
+
+using namespace ::testing;
+
+class MailboxManagerTest : public GpuServiceTest {
+ public:
+ MailboxManagerTest() : initialized_synchronizer_(false) {}
+ virtual ~MailboxManagerTest() {}
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ feature_info_ = new FeatureInfo;
+ manager_ = new MailboxManager;
+ }
+
+ virtual void SetUpWithSynchronizer() {
+ GpuServiceTest::SetUp();
+ MailboxSynchronizer::Initialize();
+ initialized_synchronizer_ = true;
+ feature_info_ = new FeatureInfo;
+ manager_ = new MailboxManager;
+ }
+
+ virtual void TearDown() {
+ if (initialized_synchronizer_)
+ MailboxSynchronizer::Terminate();
+ GpuServiceTest::TearDown();
+ }
+
+ Texture* CreateTexture() {
+ return new Texture(1);
+ }
+
+ void SetTarget(Texture* texture, GLenum target, GLuint max_level) {
+ texture->SetTarget(NULL, target, max_level);
+ }
+
+ void SetLevelInfo(
+ Texture* texture,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ texture->SetLevelInfo(NULL,
+ target,
+ level,
+ internal_format,
+ width,
+ height,
+ depth,
+ border,
+ format,
+ type,
+ cleared);
+ }
+
+ GLenum SetParameter(Texture* texture, GLenum pname, GLint param) {
+ return texture->SetParameteri(feature_info_.get(), pname, param);
+ }
+
+ void DestroyTexture(Texture* texture) {
+ delete texture;
+ }
+
+ scoped_refptr<MailboxManager> manager_;
+
+ private:
+ bool initialized_synchronizer_;
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxManagerTest);
+};
+
+// Tests basic produce/consume behavior.
+TEST_F(MailboxManagerTest, Basic) {
+ Texture* texture = CreateTexture();
+
+ Mailbox name = Mailbox::Generate();
+ manager_->ProduceTexture(0, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name));
+
+ // We can consume multiple times.
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name));
+
+ // Wrong target should fail the consume.
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(1, name));
+
+ // Destroy should cleanup the mailbox.
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name));
+}
+
+// Tests behavior with multiple produce on the same texture.
+TEST_F(MailboxManagerTest, ProduceMultipleMailbox) {
+ Texture* texture = CreateTexture();
+
+ Mailbox name1 = Mailbox::Generate();
+
+ manager_->ProduceTexture(0, name1, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+
+ // Can produce a second time with the same mailbox.
+ manager_->ProduceTexture(0, name1, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+
+ // Can produce again, with a different mailbox.
+ Mailbox name2 = Mailbox::Generate();
+ manager_->ProduceTexture(0, name2, texture);
+
+ // Still available under all mailboxes.
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture, manager_->ConsumeTexture(0, name2));
+
+ // Destroy should cleanup all mailboxes.
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name2));
+}
+
+// Tests behavior with multiple produce on the same mailbox with different
+// textures.
+TEST_F(MailboxManagerTest, ProduceMultipleTexture) {
+ Texture* texture1 = CreateTexture();
+ Texture* texture2 = CreateTexture();
+
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(0, name, texture1);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name));
+
+ // Can produce a second time with the same mailbox, but different texture.
+ manager_->ProduceTexture(0, name, texture2);
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name));
+
+ // Destroying the texture that's under no mailbox shouldn't have an effect.
+ DestroyTexture(texture1);
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name));
+
+ // Destroying the texture that's bound should clean up.
+ DestroyTexture(texture2);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name));
+}
+
+TEST_F(MailboxManagerTest, ProduceMultipleTextureMailbox) {
+ Texture* texture1 = CreateTexture();
+ Texture* texture2 = CreateTexture();
+ Mailbox name1 = Mailbox::Generate();
+ Mailbox name2 = Mailbox::Generate();
+
+ // Put texture1 on name1 and name2.
+ manager_->ProduceTexture(0, name1, texture1);
+ manager_->ProduceTexture(0, name2, texture1);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name2));
+
+ // Put texture2 on name2.
+ manager_->ProduceTexture(0, name2, texture2);
+ EXPECT_EQ(texture1, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name2));
+
+ // Destroy texture1, shouldn't affect name2.
+ DestroyTexture(texture1);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name1));
+ EXPECT_EQ(texture2, manager_->ConsumeTexture(0, name2));
+
+ DestroyTexture(texture2);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(0, name2));
+}
+
+const GLsizei kMaxTextureWidth = 64;
+const GLsizei kMaxTextureHeight = 64;
+const GLsizei kMaxTextureDepth = 1;
+
+class MailboxManagerSyncTest : public MailboxManagerTest {
+ public:
+ MailboxManagerSyncTest() {}
+ virtual ~MailboxManagerSyncTest() {}
+
+ protected:
+ virtual void SetUp() {
+ MailboxManagerTest::SetUpWithSynchronizer();
+ manager2_ = new MailboxManager;
+ context_ = new gfx::GLContextStub();
+ surface_ = new gfx::GLSurfaceStub();
+ context_->MakeCurrent(surface_.get());
+ }
+
+ Texture* DefineTexture() {
+ Texture* texture = CreateTexture();
+ const GLsizei levels_needed = TextureManager::ComputeMipMapCount(
+ GL_TEXTURE_2D, kMaxTextureWidth, kMaxTextureHeight, kMaxTextureDepth);
+ SetTarget(texture, GL_TEXTURE_2D, levels_needed);
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ SetParameter(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ SetParameter(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ return texture;
+ }
+
+ void SetupUpdateTexParamExpectations(GLuint texture_id,
+ GLenum min,
+ GLenum mag,
+ GLenum wrap_s,
+ GLenum wrap_t) {
+ DCHECK(texture_id);
+ const GLuint kCurrentTexture = 0;
+ EXPECT_CALL(*gl_, GetIntegerv(GL_TEXTURE_BINDING_2D, _))
+ .WillOnce(SetArgPointee<1>(kCurrentTexture))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, texture_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, min))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mag))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_s))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_t))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Flush())
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D, kCurrentTexture))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ virtual void TearDown() {
+ context_->ReleaseCurrent(NULL);
+ MailboxManagerTest::TearDown();
+ }
+
+ scoped_refptr<MailboxManager> manager2_;
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_refptr<gfx::GLSurface> surface_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MailboxManagerSyncTest);
+};
+
+TEST_F(MailboxManagerSyncTest, ProduceDestroy) {
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ InSequence sequence;
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+TEST_F(MailboxManagerSyncTest, ProduceSyncDestroy) {
+ InSequence sequence;
+
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // Synchronize
+ manager_->PushTextureUpdates();
+ manager2_->PullTextureUpdates();
+
+ DestroyTexture(texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+// Duplicates a texture into a second manager instance, and then
+// makes sure a redefinition becomes visible there too.
+TEST_F(MailboxManagerSyncTest, ProduceConsumeResize) {
+ const GLuint kNewTextureId = 1234;
+ InSequence sequence;
+
+ Texture* texture = DefineTexture();
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name, texture);
+ EXPECT_EQ(texture, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // Synchronize
+ manager_->PushTextureUpdates();
+ manager2_->PullTextureUpdates();
+
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ Texture* new_texture = manager2_->ConsumeTexture(GL_TEXTURE_2D, name);
+ EXPECT_FALSE(new_texture == NULL);
+ EXPECT_NE(texture, new_texture);
+ EXPECT_EQ(kNewTextureId, new_texture->service_id());
+
+ // Resize original texture
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 16,
+ 32,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Should have been orphaned
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) == NULL);
+
+ // Synchronize again
+ manager_->PushTextureUpdates();
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ manager2_->PullTextureUpdates();
+ GLsizei width, height;
+ new_texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height);
+ EXPECT_EQ(16, width);
+ EXPECT_EQ(32, height);
+
+ // Should have gotten a new attachment
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 0) != NULL);
+ // Resize original texture again....
+ SetLevelInfo(texture,
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 64,
+ 64,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // ...and immediately delete the texture which should save the changes.
+ SetupUpdateTexParamExpectations(
+ kNewTextureId, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ DestroyTexture(texture);
+
+ // Should be still around since there is a ref from manager2
+ EXPECT_EQ(new_texture, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+
+ // The last change to the texture should be visible without a sync point (i.e.
+ // push).
+ manager2_->PullTextureUpdates();
+ new_texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height);
+ EXPECT_EQ(64, width);
+ EXPECT_EQ(64, height);
+
+ DestroyTexture(new_texture);
+ EXPECT_EQ(NULL, manager_->ConsumeTexture(GL_TEXTURE_2D, name));
+ EXPECT_EQ(NULL, manager2_->ConsumeTexture(GL_TEXTURE_2D, name));
+}
+
+// Makes sure changes are correctly published even when updates are
+// pushed in both directions, i.e. makes sure we don't clobber a shared
+// texture definition with an older version.
+TEST_F(MailboxManagerSyncTest, ProduceConsumeBidirectional) {
+ const GLuint kNewTextureId1 = 1234;
+ const GLuint kNewTextureId2 = 4321;
+
+ Texture* texture1 = DefineTexture();
+ Mailbox name1 = Mailbox::Generate();
+ Texture* texture2 = DefineTexture();
+ Mailbox name2 = Mailbox::Generate();
+ Texture* new_texture1 = NULL;
+ Texture* new_texture2 = NULL;
+
+ manager_->ProduceTexture(GL_TEXTURE_2D, name1, texture1);
+ manager2_->ProduceTexture(GL_TEXTURE_2D, name2, texture2);
+
+ // Make visible.
+ manager_->PushTextureUpdates();
+ manager2_->PushTextureUpdates();
+
+ // Create textures in the other manager instances for texture1 and texture2,
+ // respectively to create a real sharing scenario. Otherwise, there would
+ // never be conflicting updates/pushes.
+ {
+ InSequence sequence;
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId1));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId1, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ new_texture1 = manager2_->ConsumeTexture(GL_TEXTURE_2D, name1);
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId2));
+ SetupUpdateTexParamExpectations(
+ kNewTextureId2, GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ new_texture2 = manager_->ConsumeTexture(GL_TEXTURE_2D, name2);
+ }
+ EXPECT_EQ(kNewTextureId1, new_texture1->service_id());
+ EXPECT_EQ(kNewTextureId2, new_texture2->service_id());
+
+ // Make a change to texture1
+ DCHECK_EQ(static_cast<GLuint>(GL_LINEAR), texture1->min_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR),
+ SetParameter(texture1, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+
+ // Make sure this does not clobber it with the previous version we pushed.
+ manager_->PullTextureUpdates();
+
+ // Make a change to texture2
+ DCHECK_EQ(static_cast<GLuint>(GL_LINEAR), texture2->mag_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR),
+ SetParameter(texture2, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+
+ Mock::VerifyAndClearExpectations(gl_.get());
+
+ // Synchronize in both directions
+ manager_->PushTextureUpdates();
+ manager2_->PushTextureUpdates();
+ // manager1 should see the change to texture2 mag_filter being applied.
+ SetupUpdateTexParamExpectations(
+ new_texture2->service_id(), GL_LINEAR, GL_NEAREST, GL_REPEAT, GL_REPEAT);
+ manager_->PullTextureUpdates();
+ // manager2 should see the change to texture1 min_filter being applied.
+ SetupUpdateTexParamExpectations(
+ new_texture1->service_id(), GL_NEAREST, GL_LINEAR, GL_REPEAT, GL_REPEAT);
+ manager2_->PullTextureUpdates();
+
+ DestroyTexture(texture1);
+ DestroyTexture(texture2);
+ DestroyTexture(new_texture1);
+ DestroyTexture(new_texture2);
+}
+
+// TODO: different texture into same mailbox
+
+// TODO: same texture, multiple mailboxes
+
+// TODO: Produce incomplete texture
+
+// TODO: Texture::level_infos_[][].size()
+
+// TODO: unsupported targets and formats
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_synchronizer.cc b/gpu/command_buffer/service/mailbox_synchronizer.cc
new file mode 100644
index 0000000..eac31f9
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_synchronizer.cc
@@ -0,0 +1,231 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_synchronizer.h"
+
+#include "base/bind.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+MailboxSynchronizer* g_instance = NULL;
+
+} // anonymous namespace
+
+// static
+bool MailboxSynchronizer::Initialize() {
+ DCHECK(!g_instance);
+ DCHECK(gfx::GetGLImplementation() != gfx::kGLImplementationNone)
+ << "GL bindings not initialized";
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationMockGL:
+ break;
+ case gfx::kGLImplementationEGLGLES2:
+#if !defined(OS_MACOSX)
+ {
+ if (!gfx::g_driver_egl.ext.b_EGL_KHR_image_base ||
+ !gfx::g_driver_egl.ext.b_EGL_KHR_gl_texture_2D_image ||
+ !gfx::g_driver_gl.ext.b_GL_OES_EGL_image ||
+ !gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
+ LOG(WARNING) << "MailboxSync not supported due to missing EGL "
+ "image/fence support";
+ return false;
+ }
+ }
+ break;
+#endif
+ default:
+ NOTREACHED();
+ return false;
+ }
+ g_instance = new MailboxSynchronizer;
+ return true;
+}
+
+// static
+void MailboxSynchronizer::Terminate() {
+ DCHECK(g_instance);
+ delete g_instance;
+ g_instance = NULL;
+}
+
+// static
+MailboxSynchronizer* MailboxSynchronizer::GetInstance() {
+ return g_instance;
+}
+
+MailboxSynchronizer::TargetName::TargetName(unsigned target,
+ const Mailbox& mailbox)
+ : target(target), mailbox(mailbox) {}
+
+MailboxSynchronizer::TextureGroup::TextureGroup(
+ const TextureDefinition& definition)
+ : definition(definition) {}
+
+MailboxSynchronizer::TextureGroup::~TextureGroup() {}
+
+MailboxSynchronizer::TextureVersion::TextureVersion(
+ linked_ptr<TextureGroup> group)
+ : version(group->definition.version()), group(group) {}
+
+MailboxSynchronizer::TextureVersion::~TextureVersion() {}
+
+MailboxSynchronizer::MailboxSynchronizer() {}
+
+MailboxSynchronizer::~MailboxSynchronizer() {
+ DCHECK_EQ(0U, textures_.size());
+}
+
+void MailboxSynchronizer::ReassociateMailboxLocked(
+ const TargetName& target_name,
+ TextureGroup* group) {
+ lock_.AssertAcquired();
+ for (TextureMap::iterator it = textures_.begin(); it != textures_.end();
+ it++) {
+ std::set<TargetName>::iterator mb_it =
+ it->second.group->mailboxes.find(target_name);
+ if (it->second.group != group &&
+ mb_it != it->second.group->mailboxes.end()) {
+ it->second.group->mailboxes.erase(mb_it);
+ }
+ }
+ group->mailboxes.insert(target_name);
+}
+
+linked_ptr<MailboxSynchronizer::TextureGroup>
+MailboxSynchronizer::GetGroupForMailboxLocked(const TargetName& target_name) {
+ lock_.AssertAcquired();
+ for (TextureMap::iterator it = textures_.begin(); it != textures_.end();
+ it++) {
+ std::set<TargetName>::const_iterator mb_it =
+ it->second.group->mailboxes.find(target_name);
+ if (mb_it != it->second.group->mailboxes.end())
+ return it->second.group;
+ }
+ return make_linked_ptr<MailboxSynchronizer::TextureGroup>(NULL);
+}
+
+Texture* MailboxSynchronizer::CreateTextureFromMailbox(unsigned target,
+ const Mailbox& mailbox) {
+ base::AutoLock lock(lock_);
+ TargetName target_name(target, mailbox);
+ linked_ptr<TextureGroup> group = GetGroupForMailboxLocked(target_name);
+ if (group.get()) {
+ Texture* new_texture = group->definition.CreateTexture();
+ if (new_texture)
+ textures_.insert(std::make_pair(new_texture, TextureVersion(group)));
+ return new_texture;
+ }
+
+ return NULL;
+}
+
+void MailboxSynchronizer::TextureDeleted(Texture* texture) {
+ base::AutoLock lock(lock_);
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ // TODO: We could avoid the update if this was the last ref.
+ UpdateTextureLocked(it->first, it->second);
+ textures_.erase(it);
+ }
+}
+
+void MailboxSynchronizer::PushTextureUpdates(MailboxManager* manager) {
+ base::AutoLock lock(lock_);
+ for (MailboxManager::MailboxToTextureMap::const_iterator texture_it =
+ manager->mailbox_to_textures_.begin();
+ texture_it != manager->mailbox_to_textures_.end();
+ texture_it++) {
+ TargetName target_name(texture_it->first.target, texture_it->first.mailbox);
+ Texture* texture = texture_it->second->first;
+ // TODO(sievers): crbug.com/352274
+ // Should probably only fail if it already *has* mipmaps, while allowing
+ // incomplete textures here. Also reconsider how to fail otherwise.
+ bool needs_mips = texture->min_filter() != GL_NEAREST &&
+ texture->min_filter() != GL_LINEAR;
+ if (target_name.target != GL_TEXTURE_2D || needs_mips)
+ continue;
+
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ TextureVersion& texture_version = it->second;
+ TextureGroup* group = texture_version.group.get();
+ std::set<TargetName>::const_iterator mb_it =
+ group->mailboxes.find(target_name);
+ if (mb_it == group->mailboxes.end()) {
+ // We previously did not associate this texture with the given mailbox.
+ // Unlink other texture groups from the mailbox.
+ ReassociateMailboxLocked(target_name, group);
+ }
+ UpdateTextureLocked(texture, texture_version);
+
+ } else {
+ // Skip compositor resources/tile textures.
+ // TODO: Remove this, see crbug.com/399226.
+ if (texture->pool() == GL_TEXTURE_POOL_MANAGED_CHROMIUM)
+ continue;
+
+ linked_ptr<TextureGroup> group = make_linked_ptr(new TextureGroup(
+ TextureDefinition(target_name.target, texture, 1, NULL)));
+
+ // Unlink other textures from this mailbox in case the name is not new.
+ ReassociateMailboxLocked(target_name, group.get());
+ textures_.insert(std::make_pair(texture, TextureVersion(group)));
+ }
+ }
+}
+
+void MailboxSynchronizer::UpdateTextureLocked(Texture* texture,
+ TextureVersion& texture_version) {
+ lock_.AssertAcquired();
+ gfx::GLImage* gl_image = texture->GetLevelImage(texture->target(), 0);
+ TextureGroup* group = texture_version.group.get();
+ scoped_refptr<NativeImageBuffer> image_buffer = group->definition.image();
+
+ // Make sure we don't clobber with an older version
+ if (!group->definition.IsOlderThan(texture_version.version))
+ return;
+
+ // Also don't push redundant updates. Note that it would break the
+ // versioning.
+ if (group->definition.Matches(texture))
+ return;
+
+ if (gl_image && !image_buffer->IsClient(gl_image)) {
+ LOG(ERROR) << "MailboxSync: Incompatible attachment";
+ return;
+ }
+
+ group->definition = TextureDefinition(texture->target(),
+ texture,
+ ++texture_version.version,
+ gl_image ? image_buffer : NULL);
+}
+
+void MailboxSynchronizer::PullTextureUpdates(MailboxManager* manager) {
+ base::AutoLock lock(lock_);
+ for (MailboxManager::MailboxToTextureMap::const_iterator texture_it =
+ manager->mailbox_to_textures_.begin();
+ texture_it != manager->mailbox_to_textures_.end();
+ texture_it++) {
+ Texture* texture = texture_it->second->first;
+ TextureMap::iterator it = textures_.find(texture);
+ if (it != textures_.end()) {
+ TextureDefinition& definition = it->second.group->definition;
+ if (it->second.version == definition.version() ||
+ definition.IsOlderThan(it->second.version))
+ continue;
+ it->second.version = definition.version();
+ definition.UpdateTexture(texture);
+ }
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/mailbox_synchronizer.h b/gpu/command_buffer/service/mailbox_synchronizer.h
new file mode 100644
index 0000000..a845963
--- /dev/null
+++ b/gpu/command_buffer/service/mailbox_synchronizer.h
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+
+#include "gpu/command_buffer/common/mailbox.h"
+
+#include <map>
+#include <set>
+
+#include "base/memory/linked_ptr.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/service/texture_definition.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MailboxManager;
+class Texture;
+
+// A thread-safe proxy that can be used to emulate texture sharing across
+// share-groups.
+class MailboxSynchronizer {
+ public:
+ ~MailboxSynchronizer();
+
+ GPU_EXPORT static bool Initialize();
+ GPU_EXPORT static void Terminate();
+ static MailboxSynchronizer* GetInstance();
+
+ // Create a texture from a globally visible mailbox.
+ Texture* CreateTextureFromMailbox(unsigned target, const Mailbox& mailbox);
+
+ void PushTextureUpdates(MailboxManager* manager);
+ void PullTextureUpdates(MailboxManager* manager);
+
+ void TextureDeleted(Texture* texture);
+
+ private:
+ MailboxSynchronizer();
+
+ struct TargetName {
+ TargetName(unsigned target, const Mailbox& mailbox);
+ bool operator<(const TargetName& rhs) const {
+ return memcmp(this, &rhs, sizeof(rhs)) < 0;
+ }
+ bool operator!=(const TargetName& rhs) const {
+ return memcmp(this, &rhs, sizeof(rhs)) != 0;
+ }
+ bool operator==(const TargetName& rhs) const {
+ return !operator!=(rhs);
+ }
+ unsigned target;
+ Mailbox mailbox;
+ };
+
+ base::Lock lock_;
+
+ struct TextureGroup {
+ explicit TextureGroup(const TextureDefinition& definition);
+ ~TextureGroup();
+
+ TextureDefinition definition;
+ std::set<TargetName> mailboxes;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextureGroup);
+ };
+
+ struct TextureVersion {
+ explicit TextureVersion(linked_ptr<TextureGroup> group);
+ ~TextureVersion();
+
+ unsigned int version;
+ linked_ptr<TextureGroup> group;
+ };
+ typedef std::map<Texture*, TextureVersion> TextureMap;
+ TextureMap textures_;
+
+ linked_ptr<TextureGroup> GetGroupForMailboxLocked(
+ const TargetName& target_name);
+ void ReassociateMailboxLocked(
+ const TargetName& target_name,
+ TextureGroup* group);
+ void UpdateTextureLocked(Texture* texture, TextureVersion& texture_version);
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxSynchronizer);
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_SYNCHRONIZER_H_
+
diff --git a/gpu/command_buffer/service/memory_program_cache.cc b/gpu/command_buffer/service/memory_program_cache.cc
new file mode 100644
index 0000000..87378aa
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache.cc
@@ -0,0 +1,370 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/memory_program_cache.h"
+
+#include "base/base64.h"
+#include "base/command_line.h"
+#include "base/metrics/histogram.h"
+#include "base/sha1.h"
+#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/service/disk_cache_proto.pb.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace {
+
+size_t GetCacheSizeBytes() {
+ const CommandLine* command_line = CommandLine::ForCurrentProcess();
+ if (command_line->HasSwitch(switches::kGpuProgramCacheSizeKb)) {
+ size_t size;
+ if (base::StringToSizeT(
+ command_line->GetSwitchValueNative(switches::kGpuProgramCacheSizeKb),
+ &size))
+ return size * 1024;
+ }
+ return gpu::kDefaultMaxProgramCacheMemoryBytes;
+}
+
+} // anonymous namespace
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+enum ShaderMapType {
+ ATTRIB_MAP = 0,
+ UNIFORM_MAP,
+ VARYING_MAP
+};
+
+void StoreShaderInfo(ShaderMapType type, ShaderProto *proto,
+ const ShaderTranslator::VariableMap& map) {
+ ShaderTranslator::VariableMap::const_iterator iter;
+ for (iter = map.begin(); iter != map.end(); ++iter) {
+ ShaderInfoProto* info = NULL;
+ switch (type) {
+ case UNIFORM_MAP:
+ info = proto->add_uniforms();
+ break;
+ case ATTRIB_MAP:
+ info = proto->add_attribs();
+ break;
+ case VARYING_MAP:
+ info = proto->add_varyings();
+ break;
+ default: NOTREACHED();
+ }
+
+ info->set_key(iter->first);
+ info->set_type(iter->second.type);
+ info->set_size(iter->second.size);
+ info->set_precision(iter->second.precision);
+ info->set_static_use(iter->second.static_use);
+ info->set_name(iter->second.name);
+ }
+}
+
+void RetrieveShaderInfo(const ShaderInfoProto& proto,
+ ShaderTranslator::VariableMap* map) {
+ ShaderTranslator::VariableInfo info(
+ proto.type(), proto.size(), proto.precision(),
+ proto.static_use(), proto.name());
+ (*map)[proto.key()] = info;
+}
+
+void FillShaderProto(ShaderProto* proto, const char* sha,
+ const Shader* shader) {
+ proto->set_sha(sha, gpu::gles2::ProgramCache::kHashLength);
+ StoreShaderInfo(ATTRIB_MAP, proto, shader->attrib_map());
+ StoreShaderInfo(UNIFORM_MAP, proto, shader->uniform_map());
+ StoreShaderInfo(VARYING_MAP, proto, shader->varying_map());
+}
+
+void RunShaderCallback(const ShaderCacheCallback& callback,
+ GpuProgramProto* proto,
+ std::string sha_string) {
+ std::string shader;
+ proto->SerializeToString(&shader);
+
+ std::string key;
+ base::Base64Encode(sha_string, &key);
+ callback.Run(key, shader);
+}
+
+} // namespace
+
+MemoryProgramCache::MemoryProgramCache()
+ : max_size_bytes_(GetCacheSizeBytes()),
+ curr_size_bytes_(0),
+ store_(ProgramMRUCache::NO_AUTO_EVICT) {
+}
+
+MemoryProgramCache::MemoryProgramCache(const size_t max_cache_size_bytes)
+ : max_size_bytes_(max_cache_size_bytes),
+ curr_size_bytes_(0),
+ store_(ProgramMRUCache::NO_AUTO_EVICT) {
+}
+
+MemoryProgramCache::~MemoryProgramCache() {}
+
+void MemoryProgramCache::ClearBackend() {
+ store_.Clear();
+ DCHECK_EQ(0U, curr_size_bytes_);
+}
+
+ProgramCache::ProgramLoadResult MemoryProgramCache::LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ DCHECK(shader_a && !shader_a->signature_source().empty() &&
+ shader_b && !shader_b->signature_source().empty());
+ ComputeShaderHash(
+ shader_a->signature_source(), translator_a, a_sha);
+ ComputeShaderHash(
+ shader_b->signature_source(), translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ ProgramMRUCache::iterator found = store_.Get(sha_string);
+ if (found == store_.end()) {
+ return PROGRAM_LOAD_FAILURE;
+ }
+ const scoped_refptr<ProgramCacheValue> value = found->second;
+ glProgramBinary(program,
+ value->format(),
+ static_cast<const GLvoid*>(value->data()),
+ value->length());
+ GLint success = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &success);
+ if (success == GL_FALSE) {
+ return PROGRAM_LOAD_FAILURE;
+ }
+ shader_a->set_attrib_map(value->attrib_map_0());
+ shader_a->set_uniform_map(value->uniform_map_0());
+ shader_a->set_varying_map(value->varying_map_0());
+ shader_b->set_attrib_map(value->attrib_map_1());
+ shader_b->set_uniform_map(value->uniform_map_1());
+ shader_b->set_varying_map(value->varying_map_1());
+
+ if (!shader_callback.is_null() &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuShaderDiskCache)) {
+ scoped_ptr<GpuProgramProto> proto(
+ GpuProgramProto::default_instance().New());
+ proto->set_sha(sha, kHashLength);
+ proto->set_format(value->format());
+ proto->set_program(value->data(), value->length());
+
+ FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
+ FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
+ RunShaderCallback(shader_callback, proto.get(), sha_string);
+ }
+
+ return PROGRAM_LOAD_SUCCESS;
+}
+
+void MemoryProgramCache::SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) {
+ GLenum format;
+ GLsizei length = 0;
+ glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH_OES, &length);
+ if (length == 0 || static_cast<unsigned int>(length) > max_size_bytes_) {
+ return;
+ }
+ scoped_ptr<char[]> binary(new char[length]);
+ glGetProgramBinary(program,
+ length,
+ NULL,
+ &format,
+ binary.get());
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.ProgramBinarySizeBytes", length);
+
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ DCHECK(shader_a && !shader_a->signature_source().empty() &&
+ shader_b && !shader_b->signature_source().empty());
+ ComputeShaderHash(
+ shader_a->signature_source(), translator_a, a_sha);
+ ComputeShaderHash(
+ shader_b->signature_source(), translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, sizeof(sha));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeBeforeKb",
+ curr_size_bytes_ / 1024);
+
+ // Evict any cached program with the same key in favor of the least recently
+ // accessed.
+ ProgramMRUCache::iterator existing = store_.Peek(sha_string);
+ if(existing != store_.end())
+ store_.Erase(existing);
+
+ while (curr_size_bytes_ + length > max_size_bytes_) {
+ DCHECK(!store_.empty());
+ store_.Erase(store_.rbegin());
+ }
+
+ if (!shader_callback.is_null() &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuShaderDiskCache)) {
+ scoped_ptr<GpuProgramProto> proto(
+ GpuProgramProto::default_instance().New());
+ proto->set_sha(sha, kHashLength);
+ proto->set_format(format);
+ proto->set_program(binary.get(), length);
+
+ FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
+ FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
+ RunShaderCallback(shader_callback, proto.get(), sha_string);
+ }
+
+ store_.Put(sha_string,
+ new ProgramCacheValue(length,
+ format,
+ binary.release(),
+ sha_string,
+ a_sha,
+ shader_a->attrib_map(),
+ shader_a->uniform_map(),
+ shader_a->varying_map(),
+ b_sha,
+ shader_b->attrib_map(),
+ shader_b->uniform_map(),
+ shader_b->varying_map(),
+ this));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeAfterKb",
+ curr_size_bytes_ / 1024);
+}
+
+void MemoryProgramCache::LoadProgram(const std::string& program) {
+ scoped_ptr<GpuProgramProto> proto(GpuProgramProto::default_instance().New());
+ if (proto->ParseFromString(program)) {
+ ShaderTranslator::VariableMap vertex_attribs;
+ ShaderTranslator::VariableMap vertex_uniforms;
+ ShaderTranslator::VariableMap vertex_varyings;
+
+ for (int i = 0; i < proto->vertex_shader().attribs_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().attribs(i), &vertex_attribs);
+ }
+
+ for (int i = 0; i < proto->vertex_shader().uniforms_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().uniforms(i), &vertex_uniforms);
+ }
+
+ for (int i = 0; i < proto->vertex_shader().varyings_size(); i++) {
+ RetrieveShaderInfo(proto->vertex_shader().varyings(i), &vertex_varyings);
+ }
+
+ ShaderTranslator::VariableMap fragment_attribs;
+ ShaderTranslator::VariableMap fragment_uniforms;
+ ShaderTranslator::VariableMap fragment_varyings;
+
+ for (int i = 0; i < proto->fragment_shader().attribs_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().attribs(i),
+ &fragment_attribs);
+ }
+
+ for (int i = 0; i < proto->fragment_shader().uniforms_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().uniforms(i),
+ &fragment_uniforms);
+ }
+
+ for (int i = 0; i < proto->fragment_shader().varyings_size(); i++) {
+ RetrieveShaderInfo(proto->fragment_shader().varyings(i),
+ &fragment_varyings);
+ }
+
+ scoped_ptr<char[]> binary(new char[proto->program().length()]);
+ memcpy(binary.get(), proto->program().c_str(), proto->program().length());
+
+ store_.Put(proto->sha(),
+ new ProgramCacheValue(proto->program().length(),
+ proto->format(),
+ binary.release(),
+ proto->sha(),
+ proto->vertex_shader().sha().c_str(),
+ vertex_attribs,
+ vertex_uniforms,
+ vertex_varyings,
+ proto->fragment_shader().sha().c_str(),
+ fragment_attribs,
+ fragment_uniforms,
+ fragment_varyings,
+ this));
+
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeAfterKb",
+ curr_size_bytes_ / 1024);
+ } else {
+ LOG(ERROR) << "Failed to parse proto file.";
+ }
+}
+
+MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
+ GLsizei length,
+ GLenum format,
+ const char* data,
+ const std::string& program_hash,
+ const char* shader_0_hash,
+ const ShaderTranslator::VariableMap& attrib_map_0,
+ const ShaderTranslator::VariableMap& uniform_map_0,
+ const ShaderTranslator::VariableMap& varying_map_0,
+ const char* shader_1_hash,
+ const ShaderTranslator::VariableMap& attrib_map_1,
+ const ShaderTranslator::VariableMap& uniform_map_1,
+ const ShaderTranslator::VariableMap& varying_map_1,
+ MemoryProgramCache* program_cache)
+ : length_(length),
+ format_(format),
+ data_(data),
+ program_hash_(program_hash),
+ shader_0_hash_(shader_0_hash, kHashLength),
+ attrib_map_0_(attrib_map_0),
+ uniform_map_0_(uniform_map_0),
+ varying_map_0_(varying_map_0),
+ shader_1_hash_(shader_1_hash, kHashLength),
+ attrib_map_1_(attrib_map_1),
+ uniform_map_1_(uniform_map_1),
+ varying_map_1_(varying_map_1),
+ program_cache_(program_cache) {
+ program_cache_->curr_size_bytes_ += length_;
+ program_cache_->LinkedProgramCacheSuccess(program_hash);
+}
+
+MemoryProgramCache::ProgramCacheValue::~ProgramCacheValue() {
+ program_cache_->curr_size_bytes_ -= length_;
+ program_cache_->Evict(program_hash_);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/memory_program_cache.h b/gpu/command_buffer/service/memory_program_cache.h
new file mode 100644
index 0000000..e72f9f5
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
+
+#include <map>
+#include <string>
+
+#include "base/containers/hash_tables.h"
+#include "base/containers/mru_cache.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Program cache that stores binaries completely in-memory
+class GPU_EXPORT MemoryProgramCache : public ProgramCache {
+ public:
+ MemoryProgramCache();
+ explicit MemoryProgramCache(const size_t max_cache_size_bytes);
+ virtual ~MemoryProgramCache();
+
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) OVERRIDE;
+ virtual void SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) OVERRIDE;
+
+ virtual void LoadProgram(const std::string& program) OVERRIDE;
+
+ private:
+ virtual void ClearBackend() OVERRIDE;
+
+ class ProgramCacheValue : public base::RefCounted<ProgramCacheValue> {
+ public:
+ ProgramCacheValue(GLsizei length,
+ GLenum format,
+ const char* data,
+ const std::string& program_hash,
+ const char* shader_0_hash,
+ const ShaderTranslator::VariableMap& attrib_map_0,
+ const ShaderTranslator::VariableMap& uniform_map_0,
+ const ShaderTranslator::VariableMap& varying_map_0,
+ const char* shader_1_hash,
+ const ShaderTranslator::VariableMap& attrib_map_1,
+ const ShaderTranslator::VariableMap& uniform_map_1,
+ const ShaderTranslator::VariableMap& varying_map_1,
+ MemoryProgramCache* program_cache);
+
+ GLsizei length() const {
+ return length_;
+ }
+
+ GLenum format() const {
+ return format_;
+ }
+
+ const char* data() const {
+ return data_.get();
+ }
+
+ const std::string& shader_0_hash() const {
+ return shader_0_hash_;
+ }
+
+ const ShaderTranslator::VariableMap& attrib_map_0() const {
+ return attrib_map_0_;
+ }
+
+ const ShaderTranslator::VariableMap& uniform_map_0() const {
+ return uniform_map_0_;
+ }
+
+ const ShaderTranslator::VariableMap& varying_map_0() const {
+ return varying_map_0_;
+ }
+
+ const std::string& shader_1_hash() const {
+ return shader_1_hash_;
+ }
+
+ const ShaderTranslator::VariableMap& attrib_map_1() const {
+ return attrib_map_1_;
+ }
+
+ const ShaderTranslator::VariableMap& uniform_map_1() const {
+ return uniform_map_1_;
+ }
+
+ const ShaderTranslator::VariableMap& varying_map_1() const {
+ return varying_map_1_;
+ }
+
+ private:
+ friend class base::RefCounted<ProgramCacheValue>;
+
+ ~ProgramCacheValue();
+
+ const GLsizei length_;
+ const GLenum format_;
+ const scoped_ptr<const char[]> data_;
+ const std::string program_hash_;
+ const std::string shader_0_hash_;
+ const ShaderTranslator::VariableMap attrib_map_0_;
+ const ShaderTranslator::VariableMap uniform_map_0_;
+ const ShaderTranslator::VariableMap varying_map_0_;
+ const std::string shader_1_hash_;
+ const ShaderTranslator::VariableMap attrib_map_1_;
+ const ShaderTranslator::VariableMap uniform_map_1_;
+ const ShaderTranslator::VariableMap varying_map_1_;
+ MemoryProgramCache* const program_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramCacheValue);
+ };
+
+ friend class ProgramCacheValue;
+
+ typedef base::MRUCache<std::string,
+ scoped_refptr<ProgramCacheValue> > ProgramMRUCache;
+
+ const size_t max_size_bytes_;
+ size_t curr_size_bytes_;
+ ProgramMRUCache store_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryProgramCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MEMORY_PROGRAM_CACHE_H_
diff --git a/gpu/command_buffer/service/memory_program_cache_unittest.cc b/gpu/command_buffer/service/memory_program_cache_unittest.cc
new file mode 100644
index 0000000..ba18ff4
--- /dev/null
+++ b/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -0,0 +1,636 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/memory_program_cache.h"
+
+#include "base/bind.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::ElementsAreArray;
+using ::testing::Invoke;
+using ::testing::SetArgPointee;
+using ::testing::SetArrayArgument;
+
+namespace {
+typedef gpu::gles2::ShaderTranslator::VariableMap VariableMap;
+} // anonymous namespace
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramBinaryEmulator {
+ public:
+ ProgramBinaryEmulator(GLsizei length,
+ GLenum format,
+ const char* binary)
+ : length_(length),
+ format_(format),
+ binary_(binary) { }
+
+ void GetProgramBinary(GLuint program,
+ GLsizei buffer_size,
+ GLsizei* length,
+ GLenum* format,
+ GLvoid* binary) {
+ if (length) {
+ *length = length_;
+ }
+ *format = format_;
+ memcpy(binary, binary_, length_);
+ }
+
+ void ProgramBinary(GLuint program,
+ GLenum format,
+ const GLvoid* binary,
+ GLsizei length) {
+ // format and length are verified by matcher
+ EXPECT_EQ(0, memcmp(binary_, binary, length));
+ }
+
+ GLsizei length() const { return length_; }
+ GLenum format() const { return format_; }
+ const char* binary() const { return binary_; }
+
+ private:
+ GLsizei length_;
+ GLenum format_;
+ const char* binary_;
+};
+
+class MemoryProgramCacheTest : public GpuServiceTest {
+ public:
+ static const size_t kCacheSizeBytes = 1024;
+ static const GLuint kVertexShaderClientId = 90;
+ static const GLuint kVertexShaderServiceId = 100;
+ static const GLuint kFragmentShaderClientId = 91;
+ static const GLuint kFragmentShaderServiceId = 100;
+
+ MemoryProgramCacheTest()
+ : cache_(new MemoryProgramCache(kCacheSizeBytes)),
+ vertex_shader_(NULL),
+ fragment_shader_(NULL),
+ shader_cache_count_(0) { }
+ virtual ~MemoryProgramCacheTest() {
+ shader_manager_.Destroy(false);
+ }
+
+ void ShaderCacheCb(const std::string& key, const std::string& shader) {
+ shader_cache_count_++;
+ shader_cache_shader_ = shader;
+ }
+
+ int32 shader_cache_count() { return shader_cache_count_; }
+ const std::string& shader_cache_shader() { return shader_cache_shader_; }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ vertex_shader_ = shader_manager_.CreateShader(kVertexShaderClientId,
+ kVertexShaderServiceId,
+ GL_VERTEX_SHADER);
+ fragment_shader_ = shader_manager_.CreateShader(
+ kFragmentShaderClientId,
+ kFragmentShaderServiceId,
+ GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader_ != NULL);
+ ASSERT_TRUE(fragment_shader_ != NULL);
+ typedef ShaderTranslatorInterface::VariableInfo VariableInfo;
+ typedef ShaderTranslator::VariableMap VariableMap;
+ VariableMap vertex_attrib_map;
+ VariableMap vertex_uniform_map;
+ VariableMap vertex_varying_map;
+ VariableMap fragment_attrib_map;
+ VariableMap fragment_uniform_map;
+ VariableMap fragment_varying_map;
+
+ vertex_attrib_map["a"] = VariableInfo(1, 34, SH_PRECISION_LOWP, 0, "a");
+ vertex_uniform_map["a"] = VariableInfo(0, 10, SH_PRECISION_MEDIUMP, 1, "a");
+ vertex_uniform_map["b"] = VariableInfo(2, 3114, SH_PRECISION_HIGHP, 1, "b");
+ vertex_varying_map["c"] = VariableInfo(3, 2, SH_PRECISION_HIGHP, 1, "c");
+ fragment_attrib_map["jjjbb"] =
+ VariableInfo(463, 1114, SH_PRECISION_MEDIUMP, 0, "jjjbb");
+ fragment_uniform_map["k"] =
+ VariableInfo(10, 34413, SH_PRECISION_MEDIUMP, 1, "k");
+ fragment_varying_map["c"] = VariableInfo(3, 2, SH_PRECISION_HIGHP, 1, "c");
+
+ vertex_shader_->set_source("bbbalsldkdkdkd");
+ fragment_shader_->set_source("bbbal sldkdkdkas 134 ad");
+
+ TestHelper::SetShaderStates(
+ gl_.get(), vertex_shader_, true, NULL, NULL,
+ &vertex_attrib_map, &vertex_uniform_map, &vertex_varying_map,
+ NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fragment_shader_, true, NULL, NULL,
+ &fragment_attrib_map, &fragment_uniform_map, &fragment_varying_map,
+ NULL);
+ }
+
+ void SetExpectationsForSaveLinkedProgram(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_PROGRAM_BINARY_LENGTH_OES, _))
+ .WillOnce(SetArgPointee<2>(emulator->length()));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramBinary(program_id, emulator->length(), _, _, _))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::GetProgramBinary));
+ }
+
+ void SetExpectationsForLoadLinkedProgram(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ ProgramBinary(program_id,
+ emulator->format(),
+ _,
+ emulator->length()))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::ProgramBinary));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgPointee<2>(GL_TRUE));
+ }
+
+ void SetExpectationsForLoadLinkedProgramFailure(
+ const GLint program_id,
+ ProgramBinaryEmulator* emulator) const {
+ EXPECT_CALL(*gl_.get(),
+ ProgramBinary(program_id,
+ emulator->format(),
+ _,
+ emulator->length()))
+ .WillOnce(Invoke(emulator, &ProgramBinaryEmulator::ProgramBinary));
+ EXPECT_CALL(*gl_.get(),
+ GetProgramiv(program_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgPointee<2>(GL_FALSE));
+ }
+
+ scoped_ptr<MemoryProgramCache> cache_;
+ ShaderManager shader_manager_;
+ Shader* vertex_shader_;
+ Shader* fragment_shader_;
+ int32 shader_cache_count_;
+ std::string shader_cache_shader_;
+};
+
+TEST_F(MemoryProgramCacheTest, CacheSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(1, shader_cache_count());
+}
+
+TEST_F(MemoryProgramCacheTest, LoadProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(1, shader_cache_count());
+
+ cache_->Clear();
+
+ cache_->LoadProgram(shader_cache_shader());
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, CacheLoadMatchesSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+ EXPECT_EQ(1, shader_cache_count());
+
+ VariableMap vertex_attrib_map = vertex_shader_->attrib_map();
+ VariableMap vertex_uniform_map = vertex_shader_->uniform_map();
+ VariableMap vertex_varying_map = vertex_shader_->varying_map();
+ VariableMap fragment_attrib_map = fragment_shader_->attrib_map();
+ VariableMap fragment_uniform_map = fragment_shader_->uniform_map();
+ VariableMap fragment_varying_map = fragment_shader_->varying_map();
+
+ vertex_shader_->set_attrib_map(VariableMap());
+ vertex_shader_->set_uniform_map(VariableMap());
+ vertex_shader_->set_varying_map(VariableMap());
+ fragment_shader_->set_attrib_map(VariableMap());
+ fragment_shader_->set_uniform_map(VariableMap());
+ fragment_shader_->set_varying_map(VariableMap());
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ // apparently the hash_map implementation on android doesn't have the
+ // equality operator
+#if !defined(OS_ANDROID)
+ EXPECT_EQ(vertex_attrib_map, vertex_shader_->attrib_map());
+ EXPECT_EQ(vertex_uniform_map, vertex_shader_->uniform_map());
+ EXPECT_EQ(vertex_varying_map, vertex_shader_->varying_map());
+ EXPECT_EQ(fragment_attrib_map, fragment_shader_->attrib_map());
+ EXPECT_EQ(fragment_uniform_map, fragment_shader_->uniform_map());
+ EXPECT_EQ(fragment_varying_map, fragment_shader_->varying_map());
+#endif
+}
+
+TEST_F(MemoryProgramCacheTest, LoadProgramMatchesSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+ EXPECT_EQ(1, shader_cache_count());
+
+ VariableMap vertex_attrib_map = vertex_shader_->attrib_map();
+ VariableMap vertex_uniform_map = vertex_shader_->uniform_map();
+ VariableMap vertex_varying_map = vertex_shader_->varying_map();
+ VariableMap fragment_attrib_map = fragment_shader_->attrib_map();
+ VariableMap fragment_uniform_map = fragment_shader_->uniform_map();
+ VariableMap fragment_varying_map = fragment_shader_->varying_map();
+
+ vertex_shader_->set_attrib_map(VariableMap());
+ vertex_shader_->set_uniform_map(VariableMap());
+ vertex_shader_->set_varying_map(VariableMap());
+ fragment_shader_->set_attrib_map(VariableMap());
+ fragment_shader_->set_uniform_map(VariableMap());
+ fragment_shader_->set_varying_map(VariableMap());
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ cache_->Clear();
+ cache_->LoadProgram(shader_cache_shader());
+
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ // apparently the hash_map implementation on android doesn't have the
+ // equality operator
+#if !defined(OS_ANDROID)
+ EXPECT_EQ(vertex_attrib_map, vertex_shader_->attrib_map());
+ EXPECT_EQ(vertex_uniform_map, vertex_shader_->uniform_map());
+ EXPECT_EQ(vertex_varying_map, vertex_shader_->varying_map());
+ EXPECT_EQ(fragment_attrib_map, fragment_shader_->attrib_map());
+ EXPECT_EQ(fragment_uniform_map, fragment_shader_->uniform_map());
+ EXPECT_EQ(fragment_varying_map, fragment_shader_->varying_map());
+#endif
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnLinkFalse) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ SetExpectationsForLoadLinkedProgramFailure(kProgramId, &emulator);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnDifferentSource) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ const std::string vertex_orig_source = vertex_shader_->signature_source();
+ vertex_shader_->set_source("different!");
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+
+ vertex_shader_->set_source(vertex_orig_source);
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ fragment_shader_->set_source("different!");
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadFailOnDifferentMap) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ ProgramCache::LocationMap binding_map;
+ binding_map["test"] = 512;
+ cache_->SaveLinkedProgram(kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ &binding_map,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ binding_map["different!"] = 59;
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ &binding_map,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, MemoryProgramCacheEviction) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator1(kBinaryLength, kFormat, test_binary);
+
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator1);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ const int kEvictingProgramId = 11;
+ const GLuint kEvictingBinaryLength = kCacheSizeBytes - kBinaryLength + 1;
+
+ // save old source and modify for new program
+ const std::string& old_source = fragment_shader_->signature_source();
+ fragment_shader_->set_source("al sdfkjdk");
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+
+ scoped_ptr<char[]> bigTestBinary =
+ scoped_ptr<char[]>(new char[kEvictingBinaryLength]);
+ for (size_t i = 0; i < kEvictingBinaryLength; ++i) {
+ bigTestBinary[i] = i % 250;
+ }
+ ProgramBinaryEmulator emulator2(kEvictingBinaryLength,
+ kFormat,
+ bigTestBinary.get());
+
+ SetExpectationsForSaveLinkedProgram(kEvictingProgramId, &emulator2);
+ cache_->SaveLinkedProgram(kEvictingProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN, cache_->GetLinkedProgramStatus(
+ old_source,
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, SaveCorrectProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator1(kBinaryLength, kFormat, test_binary);
+
+ vertex_shader_->set_source("different!");
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator1);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+}
+
+TEST_F(MemoryProgramCacheTest, LoadCorrectProgram) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED, cache_->GetLinkedProgramStatus(
+ vertex_shader_->signature_source(),
+ NULL,
+ fragment_shader_->signature_source(),
+ NULL,
+ NULL));
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator);
+
+ fragment_shader_->set_source("different!");
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+TEST_F(MemoryProgramCacheTest, OverwriteOnNewSave) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+
+ char test_binary2[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary2[i] = (i*2) % 250;
+ }
+ ProgramBinaryEmulator emulator2(kBinaryLength, kFormat, test_binary2);
+ SetExpectationsForSaveLinkedProgram(kProgramId, &emulator2);
+ cache_->SaveLinkedProgram(kProgramId, vertex_shader_, NULL,
+ fragment_shader_, NULL, NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+
+ SetExpectationsForLoadLinkedProgram(kProgramId, &emulator2);
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_SUCCESS, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ NULL,
+ fragment_shader_,
+ NULL,
+ NULL,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/memory_tracking.h b/gpu/command_buffer/service/memory_tracking.h
new file mode 100644
index 0000000..1514325
--- /dev/null
+++ b/gpu/command_buffer/service/memory_tracking.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+
+namespace gpu {
+namespace gles2 {
+
+// A MemoryTracker is used to propagate per-ContextGroup memory usage
+// statistics to the global GpuMemoryManager.
+class MemoryTracker : public base::RefCounted<MemoryTracker> {
+ public:
+ enum Pool {
+ kUnmanaged,
+ kManaged
+ };
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) = 0;
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ virtual bool EnsureGPUMemoryAvailable(size_t size_needed) = 0;
+
+ protected:
+ friend class base::RefCounted<MemoryTracker>;
+ MemoryTracker() {}
+ virtual ~MemoryTracker() {};
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MemoryTracker);
+};
+
+// A MemoryTypeTracker tracks the use of a particular type of memory (buffer,
+// texture, or renderbuffer) and forward the result to a specified
+// MemoryTracker.
+class MemoryTypeTracker {
+ public:
+ MemoryTypeTracker(MemoryTracker* memory_tracker, MemoryTracker::Pool pool)
+ : memory_tracker_(memory_tracker),
+ pool_(pool),
+ has_done_update_(false),
+ mem_represented_(0),
+ mem_represented_at_last_update_(0) {
+ UpdateMemRepresented();
+ }
+
+ ~MemoryTypeTracker() {
+ UpdateMemRepresented();
+ }
+
+ void TrackMemAlloc(size_t bytes) {
+ mem_represented_ += bytes;
+ UpdateMemRepresented();
+ }
+
+ void TrackMemFree(size_t bytes) {
+ DCHECK(bytes <= mem_represented_);
+ mem_represented_ -= bytes;
+ UpdateMemRepresented();
+ }
+
+ size_t GetMemRepresented() const {
+ return mem_represented_at_last_update_;
+ }
+
+ // Ensure a certain amount of GPU memory is free. Returns true on success.
+ bool EnsureGPUMemoryAvailable(size_t size_needed) {
+ if (memory_tracker_) {
+ return memory_tracker_->EnsureGPUMemoryAvailable(size_needed);
+ }
+ return true;
+ }
+
+ private:
+ void UpdateMemRepresented() {
+ // Skip redundant updates only if we have already done an update.
+ if (!has_done_update_ &&
+ mem_represented_ == mem_represented_at_last_update_) {
+ return;
+ }
+ if (memory_tracker_) {
+ memory_tracker_->TrackMemoryAllocatedChange(
+ mem_represented_at_last_update_,
+ mem_represented_,
+ pool_);
+ }
+ has_done_update_ = true;
+ mem_represented_at_last_update_ = mem_represented_;
+ }
+
+ MemoryTracker* memory_tracker_;
+ MemoryTracker::Pool pool_;
+ bool has_done_update_;
+ size_t mem_represented_;
+ size_t mem_represented_at_last_update_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryTypeTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MEMORY_TRACKING_H_
diff --git a/gpu/command_buffer/service/mocks.cc b/gpu/command_buffer/service/mocks.cc
new file mode 100644
index 0000000..143ec0b
--- /dev/null
+++ b/gpu/command_buffer/service/mocks.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+
+using testing::Invoke;
+using testing::_;
+
+namespace gpu {
+
+AsyncAPIMock::AsyncAPIMock(bool default_do_commands) {
+ testing::DefaultValue<error::Error>::Set(
+ error::kNoError);
+
+ if (default_do_commands) {
+ ON_CALL(*this, DoCommands(_, _, _, _))
+ .WillByDefault(Invoke(this, &AsyncAPIMock::FakeDoCommands));
+ }
+}
+
+AsyncAPIMock::~AsyncAPIMock() {}
+
+error::Error AsyncAPIMock::FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ return AsyncAPIInterface::DoCommands(
+ num_commands, buffer, num_entries, entries_processed);
+}
+
+void AsyncAPIMock::SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args) {
+ DCHECK(engine_);
+ DCHECK_EQ(1u, command);
+ DCHECK_EQ(1u, arg_count);
+ const cmd::SetToken* args =
+ static_cast<const cmd::SetToken*>(_args);
+ engine_->set_token(args->token);
+}
+
+namespace gles2 {
+
+MockShaderTranslator::MockShaderTranslator() {}
+
+MockShaderTranslator::~MockShaderTranslator() {}
+
+MockProgramCache::MockProgramCache() {}
+MockProgramCache::~MockProgramCache() {}
+
+MockMemoryTracker::MockMemoryTracker() {}
+MockMemoryTracker::~MockMemoryTracker() {}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/mocks.h b/gpu/command_buffer/service/mocks.h
new file mode 100644
index 0000000..17c8401
--- /dev/null
+++ b/gpu/command_buffer/service/mocks.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains definitions for mock objects, used for testing.
+
+// TODO(apatrick): This file "manually" defines some mock objects. Using gMock
+// would be definitely preferable, unfortunately it doesn't work on Windows yet.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/service/cmd_parser.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+
+// Mocks an AsyncAPIInterface, using GMock.
+class AsyncAPIMock : public AsyncAPIInterface {
+ public:
+ explicit AsyncAPIMock(bool default_do_commands);
+ virtual ~AsyncAPIMock();
+
+ error::Error FakeDoCommands(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Predicate that matches args passed to DoCommand, by looking at the values.
+ class IsArgs {
+ public:
+ IsArgs(unsigned int arg_count, const void* args)
+ : arg_count_(arg_count),
+ args_(static_cast<CommandBufferEntry*>(const_cast<void*>(args))) {
+ }
+
+ bool operator() (const void* _args) const {
+ const CommandBufferEntry* args =
+ static_cast<const CommandBufferEntry*>(_args) + 1;
+ for (unsigned int i = 0; i < arg_count_; ++i) {
+ if (args[i].value_uint32 != args_[i].value_uint32) return false;
+ }
+ return true;
+ }
+
+ private:
+ unsigned int arg_count_;
+ CommandBufferEntry *args_;
+ };
+
+ MOCK_METHOD3(DoCommand, error::Error(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data));
+
+ MOCK_METHOD4(DoCommands,
+ error::Error(unsigned int num_commands,
+ const void* buffer,
+ int num_entries,
+ int* entries_processed));
+
+ const char* GetCommandName(unsigned int command_id) const {
+ return "";
+ };
+
+ // Sets the engine, to forward SetToken commands to it.
+ void set_engine(CommandBufferEngine *engine) { engine_ = engine; }
+
+ // Forwards the SetToken commands to the engine.
+ void SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args);
+
+ private:
+ CommandBufferEngine *engine_;
+};
+
+namespace gles2 {
+
+class MockShaderTranslator : public ShaderTranslatorInterface {
+ public:
+ MockShaderTranslator();
+ virtual ~MockShaderTranslator();
+
+ MOCK_METHOD5(Init, bool(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds));
+ MOCK_CONST_METHOD7(Translate, bool(
+ const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map));
+ MOCK_CONST_METHOD0(
+ GetStringForOptionsThatWouldAffectCompilation, std::string());
+};
+
+class MockProgramCache : public ProgramCache {
+ public:
+ MockProgramCache();
+ virtual ~MockProgramCache();
+
+ MOCK_METHOD7(LoadLinkedProgram, ProgramLoadResult(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& callback));
+
+ MOCK_METHOD7(SaveLinkedProgram, void(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& callback));
+ MOCK_METHOD1(LoadProgram, void(const std::string&));
+
+ private:
+ MOCK_METHOD0(ClearBackend, void());
+};
+
+class MockMemoryTracker : public MemoryTracker {
+ public:
+ MockMemoryTracker();
+
+ MOCK_METHOD3(TrackMemoryAllocatedChange, void(
+ size_t old_size, size_t new_size, Pool pool));
+ MOCK_METHOD1(EnsureGPUMemoryAvailable, bool(size_t size_needed));
+
+ private:
+ friend class ::testing::StrictMock<MockMemoryTracker>;
+ friend class base::RefCounted< ::testing::StrictMock<MockMemoryTracker> >;
+ virtual ~MockMemoryTracker();
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MOCKS_H_
diff --git a/gpu/command_buffer/service/program_cache.cc b/gpu/command_buffer/service/program_cache.cc
new file mode 100644
index 0000000..ad395c7
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_cache.h"
+
+#include <string>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+ProgramCache::ProgramCache() {}
+ProgramCache::~ProgramCache() {}
+
+void ProgramCache::Clear() {
+ ClearBackend();
+ link_status_.clear();
+}
+
+ProgramCache::LinkedProgramStatus ProgramCache::GetLinkedProgramStatus(
+ const std::string& untranslated_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& untranslated_b,
+ const ShaderTranslatorInterface* translator_b,
+ const std::map<std::string, GLint>* bind_attrib_location_map) const {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(untranslated_a, translator_a, a_sha);
+ ComputeShaderHash(untranslated_b, translator_b, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ LinkStatusMap::const_iterator found = link_status_.find(sha_string);
+ if (found == link_status_.end()) {
+ return ProgramCache::LINK_UNKNOWN;
+ } else {
+ return found->second;
+ }
+}
+
+void ProgramCache::LinkedProgramCacheSuccess(
+ const std::string& shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(shader_a, translator_a, a_sha);
+ ComputeShaderHash(shader_b, translator_b, b_sha);
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ bind_attrib_location_map,
+ sha);
+ const std::string sha_string(sha, kHashLength);
+
+ LinkedProgramCacheSuccess(sha_string);
+}
+
+void ProgramCache::LinkedProgramCacheSuccess(const std::string& program_hash) {
+ link_status_[program_hash] = LINK_SUCCEEDED;
+}
+
+void ProgramCache::ComputeShaderHash(
+ const std::string& str,
+ const ShaderTranslatorInterface* translator,
+ char* result) const {
+ std::string s((
+ translator ? translator->GetStringForOptionsThatWouldAffectCompilation() :
+ std::string()) + str);
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(s.c_str()),
+ s.length(), reinterpret_cast<unsigned char*>(result));
+}
+
+void ProgramCache::Evict(const std::string& program_hash) {
+ link_status_.erase(program_hash);
+}
+
+namespace {
+size_t CalculateMapSize(const std::map<std::string, GLint>* map) {
+ if (!map) {
+ return 0;
+ }
+ std::map<std::string, GLint>::const_iterator it;
+ size_t total = 0;
+ for (it = map->begin(); it != map->end(); ++it) {
+ total += 4 + it->first.length();
+ }
+ return total;
+}
+} // anonymous namespace
+
+void ProgramCache::ComputeProgramHash(
+ const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const std::map<std::string, GLint>* bind_attrib_location_map,
+ char* result) const {
+ const size_t shader0_size = kHashLength;
+ const size_t shader1_size = kHashLength;
+ const size_t map_size = CalculateMapSize(bind_attrib_location_map);
+ const size_t total_size = shader0_size + shader1_size + map_size;
+
+ scoped_ptr<unsigned char[]> buffer(new unsigned char[total_size]);
+ memcpy(buffer.get(), hashed_shader_0, shader0_size);
+ memcpy(&buffer[shader0_size], hashed_shader_1, shader1_size);
+ if (map_size != 0) {
+ // copy our map
+ size_t current_pos = shader0_size + shader1_size;
+ std::map<std::string, GLint>::const_iterator it;
+ for (it = bind_attrib_location_map->begin();
+ it != bind_attrib_location_map->end();
+ ++it) {
+ const size_t name_size = it->first.length();
+ memcpy(&buffer.get()[current_pos], it->first.c_str(), name_size);
+ current_pos += name_size;
+ const GLint value = it->second;
+ buffer[current_pos++] = value >> 24;
+ buffer[current_pos++] = value >> 16;
+ buffer[current_pos++] = value >> 8;
+ buffer[current_pos++] = value;
+ }
+ }
+ base::SHA1HashBytes(buffer.get(),
+ total_size, reinterpret_cast<unsigned char*>(result));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_cache.h b/gpu/command_buffer/service/program_cache.h
new file mode 100644
index 0000000..3fb5687
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
+
+#include <map>
+#include <string>
+
+#include "base/containers/hash_tables.h"
+#include "base/sha1.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+class Shader;
+class ShaderTranslator;
+
+// Program cache base class for caching linked gpu programs
+class GPU_EXPORT ProgramCache {
+ public:
+ static const size_t kHashLength = base::kSHA1Length;
+
+ typedef std::map<std::string, GLint> LocationMap;
+
+ enum LinkedProgramStatus {
+ LINK_UNKNOWN,
+ LINK_SUCCEEDED
+ };
+
+ enum ProgramLoadResult {
+ PROGRAM_LOAD_FAILURE,
+ PROGRAM_LOAD_SUCCESS
+ };
+
+ ProgramCache();
+ virtual ~ProgramCache();
+
+ LinkedProgramStatus GetLinkedProgramStatus(
+ const std::string& untranslated_shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& untranslated_shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map) const;
+
+ // Loads the linked program from the cache. If the program is not found or
+ // there was an error, PROGRAM_LOAD_FAILURE should be returned.
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) = 0;
+
+ // Saves the program into the cache. If successful, the implementation should
+ // call LinkedProgramCacheSuccess.
+ virtual void SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const Shader* shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map,
+ const ShaderCacheCallback& shader_callback) = 0;
+
+ virtual void LoadProgram(const std::string& program) = 0;
+
+ // clears the cache
+ void Clear();
+
+ // Only for testing
+ void LinkedProgramCacheSuccess(const std::string& shader_a,
+ const ShaderTranslatorInterface* translator_a,
+ const std::string& shader_b,
+ const ShaderTranslatorInterface* translator_b,
+ const LocationMap* bind_attrib_location_map);
+
+ protected:
+ // called by implementing class after a shader was successfully cached
+ void LinkedProgramCacheSuccess(const std::string& program_hash);
+
+ // result is not null terminated
+ void ComputeShaderHash(const std::string& shader,
+ const ShaderTranslatorInterface* translator,
+ char* result) const;
+
+ // result is not null terminated. hashed shaders are expected to be
+ // kHashLength in length
+ void ComputeProgramHash(
+ const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const LocationMap* bind_attrib_location_map,
+ char* result) const;
+
+ void Evict(const std::string& program_hash);
+
+ private:
+ typedef base::hash_map<std::string,
+ LinkedProgramStatus> LinkStatusMap;
+
+ // called to clear the backend cache
+ virtual void ClearBackend() = 0;
+
+ LinkStatusMap link_status_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PROGRAM_CACHE_H_
diff --git a/gpu/command_buffer/service/program_cache_unittest.cc b/gpu/command_buffer/service/program_cache_unittest.cc
new file mode 100644
index 0000000..4e2abc3
--- /dev/null
+++ b/gpu/command_buffer/service/program_cache_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_cache.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Return;
+
+namespace gpu {
+namespace gles2 {
+
+class NoBackendProgramCache : public ProgramCache {
+ public:
+ virtual ProgramLoadResult LoadLinkedProgram(
+ GLuint /* program */,
+ Shader* /* shader_a */,
+ const ShaderTranslatorInterface* /* translator_a */,
+ Shader* /* shader_b */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const LocationMap* /* bind_attrib_location_map */,
+ const ShaderCacheCallback& /* callback */) OVERRIDE {
+ return PROGRAM_LOAD_SUCCESS;
+ }
+ virtual void SaveLinkedProgram(
+ GLuint /* program */,
+ const Shader* /* shader_a */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const Shader* /* shader_b */,
+ const ShaderTranslatorInterface* /* translator_b */,
+ const LocationMap* /* bind_attrib_location_map */,
+ const ShaderCacheCallback& /* callback */) OVERRIDE { }
+
+ virtual void LoadProgram(const std::string& /* program */) OVERRIDE {}
+
+ virtual void ClearBackend() OVERRIDE {}
+
+ void SaySuccessfullyCached(const std::string& shader1,
+ const ShaderTranslatorInterface* translator_1,
+ const std::string& shader2,
+ const ShaderTranslatorInterface* translator_2,
+ std::map<std::string, GLint>* attrib_map) {
+ char a_sha[kHashLength];
+ char b_sha[kHashLength];
+ ComputeShaderHash(shader1, translator_1, a_sha);
+ ComputeShaderHash(shader2, translator_2, b_sha);
+
+ char sha[kHashLength];
+ ComputeProgramHash(a_sha,
+ b_sha,
+ attrib_map,
+ sha);
+ const std::string shaString(sha, kHashLength);
+
+ LinkedProgramCacheSuccess(shaString);
+ }
+
+ void ComputeShaderHash(const std::string& shader,
+ const ShaderTranslatorInterface* translator,
+ char* result) const {
+ ProgramCache::ComputeShaderHash(shader, translator, result);
+ }
+
+ void ComputeProgramHash(const char* hashed_shader_0,
+ const char* hashed_shader_1,
+ const LocationMap* bind_attrib_location_map,
+ char* result) const {
+ ProgramCache::ComputeProgramHash(hashed_shader_0,
+ hashed_shader_1,
+ bind_attrib_location_map,
+ result);
+ }
+
+ void Evict(const std::string& program_hash) {
+ ProgramCache::Evict(program_hash);
+ }
+};
+
+class ProgramCacheTest : public testing::Test {
+ public:
+ ProgramCacheTest() :
+ cache_(new NoBackendProgramCache()) { }
+
+ protected:
+ scoped_ptr<NoBackendProgramCache> cache_;
+};
+
+TEST_F(ProgramCacheTest, LinkStatusSave) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ {
+ std::string shader_a = shader1;
+ std::string shader_b = shader2;
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(
+ shader_a, NULL, shader_b, NULL, NULL));
+ cache_->SaySuccessfullyCached(shader_a, NULL, shader_b, NULL, NULL);
+
+ shader_a.clear();
+ shader_b.clear();
+ }
+ // make sure it was copied
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED,
+ cache_->GetLinkedProgramStatus(
+ shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, LinkUnknownOnFragmentSourceChange) {
+ const std::string shader1 = "abcd1234";
+ std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+
+ shader2 = "different!";
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, LinkUnknownOnVertexSourceChange) {
+ std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+
+ shader1 = "different!";
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, StatusEviction) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ char a_sha[ProgramCache::kHashLength];
+ char b_sha[ProgramCache::kHashLength];
+ cache_->ComputeShaderHash(shader1, NULL, a_sha);
+ cache_->ComputeShaderHash(shader2, NULL, b_sha);
+
+ char sha[ProgramCache::kHashLength];
+ cache_->ComputeProgramHash(a_sha,
+ b_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, EvictionWithReusedShader) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ const std::string shader3 = "asbjbbjj239a";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ cache_->SaySuccessfullyCached(shader1, NULL, shader3, NULL, NULL);
+
+ char a_sha[ProgramCache::kHashLength];
+ char b_sha[ProgramCache::kHashLength];
+ char c_sha[ProgramCache::kHashLength];
+ cache_->ComputeShaderHash(shader1, NULL, a_sha);
+ cache_->ComputeShaderHash(shader2, NULL, b_sha);
+ cache_->ComputeShaderHash(shader3, NULL, c_sha);
+
+ char sha[ProgramCache::kHashLength];
+ cache_->ComputeProgramHash(a_sha,
+ b_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+
+
+ cache_->ComputeProgramHash(a_sha,
+ c_sha,
+ NULL,
+ sha);
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+}
+
+TEST_F(ProgramCacheTest, StatusClear) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ const std::string shader3 = "asbjbbjj239a";
+ cache_->SaySuccessfullyCached(shader1, NULL, shader2, NULL, NULL);
+ cache_->SaySuccessfullyCached(shader1, NULL, shader3, NULL, NULL);
+ cache_->Clear();
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader2, NULL, NULL));
+ EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
+ cache_->GetLinkedProgramStatus(shader1, NULL, shader3, NULL, NULL));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_manager.cc b/gpu/command_buffer/service/program_manager.cc
new file mode 100644
index 0000000..4dd4bc4
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager.cc
@@ -0,0 +1,1374 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_manager.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "third_party/re2/re2/re2.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+struct UniformType {
+ explicit UniformType(const ShaderTranslator::VariableInfo uniform)
+ : type(uniform.type),
+ size(uniform.size),
+ precision(uniform.precision) { }
+
+ UniformType()
+ : type(0),
+ size(0),
+ precision(SH_PRECISION_MEDIUMP) { }
+
+ bool operator==(const UniformType& other) const {
+ return type == other.type &&
+ size == other.size &&
+ precision == other.precision;
+ }
+
+ int type;
+ int size;
+ int precision;
+};
+
+int ShaderTypeToIndex(GLenum shader_type) {
+ switch (shader_type) {
+ case GL_VERTEX_SHADER:
+ return 0;
+ case GL_FRAGMENT_SHADER:
+ return 1;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+// Given a name like "foo.bar[123].moo[456]" sets new_name to "foo.bar[123].moo"
+// and sets element_index to 456. returns false if element expression was not a
+// whole decimal number. For example: "foo[1b2]"
+bool GetUniformNameSansElement(
+ const std::string& name, int* element_index, std::string* new_name) {
+ DCHECK(element_index);
+ DCHECK(new_name);
+ if (name.size() < 3 || name[name.size() - 1] != ']') {
+ *element_index = 0;
+ *new_name = name;
+ return true;
+ }
+
+ // Look for an array specification.
+ size_t open_pos = name.find_last_of('[');
+ if (open_pos == std::string::npos ||
+ open_pos >= name.size() - 2) {
+ return false;
+ }
+
+ GLint index = 0;
+ size_t last = name.size() - 1;
+ for (size_t pos = open_pos + 1; pos < last; ++pos) {
+ int8 digit = name[pos] - '0';
+ if (digit < 0 || digit > 9) {
+ return false;
+ }
+ index = index * 10 + digit;
+ }
+
+ *element_index = index;
+ *new_name = name.substr(0, open_pos);
+ return true;
+}
+
+bool IsBuiltInVarying(const std::string& name) {
+ // Built-in variables.
+ const char* kBuiltInVaryings[] = {
+ "gl_FragCoord",
+ "gl_FrontFacing",
+ "gl_PointCoord"
+ };
+ for (size_t ii = 0; ii < arraysize(kBuiltInVaryings); ++ii) {
+ if (name == kBuiltInVaryings[ii])
+ return true;
+ }
+ return false;
+}
+
+} // anonymous namespace.
+
+Program::UniformInfo::UniformInfo()
+ : size(0),
+ type(GL_NONE),
+ fake_location_base(0),
+ is_array(false) {
+}
+
+Program::UniformInfo::UniformInfo(GLsizei _size,
+ GLenum _type,
+ int _fake_location_base,
+ const std::string& _name)
+ : size(_size),
+ type(_type),
+ accepts_api_type(0),
+ fake_location_base(_fake_location_base),
+ is_array(false),
+ name(_name) {
+ switch (type) {
+ case GL_INT:
+ accepts_api_type = kUniform1i;
+ break;
+ case GL_INT_VEC2:
+ accepts_api_type = kUniform2i;
+ break;
+ case GL_INT_VEC3:
+ accepts_api_type = kUniform3i;
+ break;
+ case GL_INT_VEC4:
+ accepts_api_type = kUniform4i;
+ break;
+
+ case GL_BOOL:
+ accepts_api_type = kUniform1i | kUniform1f;
+ break;
+ case GL_BOOL_VEC2:
+ accepts_api_type = kUniform2i | kUniform2f;
+ break;
+ case GL_BOOL_VEC3:
+ accepts_api_type = kUniform3i | kUniform3f;
+ break;
+ case GL_BOOL_VEC4:
+ accepts_api_type = kUniform4i | kUniform4f;
+ break;
+
+ case GL_FLOAT:
+ accepts_api_type = kUniform1f;
+ break;
+ case GL_FLOAT_VEC2:
+ accepts_api_type = kUniform2f;
+ break;
+ case GL_FLOAT_VEC3:
+ accepts_api_type = kUniform3f;
+ break;
+ case GL_FLOAT_VEC4:
+ accepts_api_type = kUniform4f;
+ break;
+
+ case GL_FLOAT_MAT2:
+ accepts_api_type = kUniformMatrix2f;
+ break;
+ case GL_FLOAT_MAT3:
+ accepts_api_type = kUniformMatrix3f;
+ break;
+ case GL_FLOAT_MAT4:
+ accepts_api_type = kUniformMatrix4f;
+ break;
+
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_2D_RECT_ARB:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_EXTERNAL_OES:
+ accepts_api_type = kUniform1i;
+ break;
+ default:
+ NOTREACHED() << "Unhandled UniformInfo type " << type;
+ break;
+ }
+}
+
+Program::UniformInfo::~UniformInfo() {}
+
+bool ProgramManager::IsInvalidPrefix(const char* name, size_t length) {
+ static const char kInvalidPrefix[] = { 'g', 'l', '_' };
+ return (length >= sizeof(kInvalidPrefix) &&
+ memcmp(name, kInvalidPrefix, sizeof(kInvalidPrefix)) == 0);
+}
+
+Program::Program(
+ ProgramManager* manager, GLuint service_id)
+ : manager_(manager),
+ use_count_(0),
+ max_attrib_name_length_(0),
+ max_uniform_name_length_(0),
+ service_id_(service_id),
+ deleted_(false),
+ valid_(false),
+ link_status_(false),
+ uniforms_cleared_(false),
+ num_uniforms_(0) {
+ manager_->StartTracking(this);
+}
+
+void Program::Reset() {
+ valid_ = false;
+ link_status_ = false;
+ num_uniforms_ = 0;
+ max_uniform_name_length_ = 0;
+ max_attrib_name_length_ = 0;
+ attrib_infos_.clear();
+ uniform_infos_.clear();
+ sampler_indices_.clear();
+ attrib_location_to_index_map_.clear();
+}
+
+std::string Program::ProcessLogInfo(
+ const std::string& log) {
+ std::string output;
+ re2::StringPiece input(log);
+ std::string prior_log;
+ std::string hashed_name;
+ while (RE2::Consume(&input,
+ "(.*?)(webgl_[0123456789abcdefABCDEF]+)",
+ &prior_log,
+ &hashed_name)) {
+ output += prior_log;
+
+ const std::string* original_name =
+ GetOriginalNameFromHashedName(hashed_name);
+ if (original_name)
+ output += *original_name;
+ else
+ output += hashed_name;
+ }
+
+ return output + input.as_string();
+}
+
+void Program::UpdateLogInfo() {
+ GLint max_len = 0;
+ glGetProgramiv(service_id_, GL_INFO_LOG_LENGTH, &max_len);
+ if (max_len == 0) {
+ set_log_info(NULL);
+ return;
+ }
+ scoped_ptr<char[]> temp(new char[max_len]);
+ GLint len = 0;
+ glGetProgramInfoLog(service_id_, max_len, &len, temp.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || temp[len] == '\0');
+ std::string log(temp.get(), len);
+ set_log_info(ProcessLogInfo(log).c_str());
+}
+
+void Program::ClearUniforms(
+ std::vector<uint8>* zero_buffer) {
+ DCHECK(zero_buffer);
+ if (uniforms_cleared_) {
+ return;
+ }
+ uniforms_cleared_ = true;
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& uniform_info = uniform_infos_[ii];
+ if (!uniform_info.IsValid()) {
+ continue;
+ }
+ GLint location = uniform_info.element_locations[0];
+ GLsizei size = uniform_info.size;
+ uint32 unit_size = GLES2Util::GetGLDataTypeSizeForUniforms(
+ uniform_info.type);
+ uint32 size_needed = size * unit_size;
+ if (size_needed > zero_buffer->size()) {
+ zero_buffer->resize(size_needed, 0u);
+ }
+ const void* zero = &(*zero_buffer)[0];
+ switch (uniform_info.type) {
+ case GL_FLOAT:
+ glUniform1fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC2:
+ glUniform2fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC3:
+ glUniform3fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_VEC4:
+ glUniform4fv(location, size, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_INT:
+ case GL_BOOL:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_2D_RECT_ARB:
+ glUniform1iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC2:
+ case GL_BOOL_VEC2:
+ glUniform2iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC3:
+ case GL_BOOL_VEC3:
+ glUniform3iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_INT_VEC4:
+ case GL_BOOL_VEC4:
+ glUniform4iv(location, size, reinterpret_cast<const GLint*>(zero));
+ break;
+ case GL_FLOAT_MAT2:
+ glUniformMatrix2fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_MAT3:
+ glUniformMatrix3fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ case GL_FLOAT_MAT4:
+ glUniformMatrix4fv(
+ location, size, false, reinterpret_cast<const GLfloat*>(zero));
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+}
+
+namespace {
+
+struct UniformData {
+ UniformData() : size(-1), type(GL_NONE), location(0), added(false) {
+ }
+ std::string queried_name;
+ std::string corrected_name;
+ std::string original_name;
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ bool added;
+};
+
+struct UniformDataComparer {
+ bool operator()(const UniformData& lhs, const UniformData& rhs) const {
+ return lhs.queried_name < rhs.queried_name;
+ }
+};
+
+} // anonymous namespace
+
+void Program::Update() {
+ Reset();
+ UpdateLogInfo();
+ link_status_ = true;
+ uniforms_cleared_ = false;
+ GLint num_attribs = 0;
+ GLint max_len = 0;
+ GLint max_location = -1;
+ glGetProgramiv(service_id_, GL_ACTIVE_ATTRIBUTES, &num_attribs);
+ glGetProgramiv(service_id_, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, &max_len);
+ // TODO(gman): Should we check for error?
+ scoped_ptr<char[]> name_buffer(new char[max_len]);
+ for (GLint ii = 0; ii < num_attribs; ++ii) {
+ GLsizei length = 0;
+ GLsizei size = 0;
+ GLenum type = 0;
+ glGetActiveAttrib(
+ service_id_, ii, max_len, &length, &size, &type, name_buffer.get());
+ DCHECK(max_len == 0 || length < max_len);
+ DCHECK(length == 0 || name_buffer[length] == '\0');
+ if (!ProgramManager::IsInvalidPrefix(name_buffer.get(), length)) {
+ std::string name;
+ std::string original_name;
+ GetCorrectedVariableInfo(
+ false, name_buffer.get(), &name, &original_name, &size, &type);
+ // TODO(gman): Should we check for error?
+ GLint location = glGetAttribLocation(service_id_, name_buffer.get());
+ if (location > max_location) {
+ max_location = location;
+ }
+ attrib_infos_.push_back(
+ VertexAttrib(size, type, original_name, location));
+ max_attrib_name_length_ = std::max(
+ max_attrib_name_length_, static_cast<GLsizei>(original_name.size()));
+ }
+ }
+
+ // Create attrib location to index map.
+ attrib_location_to_index_map_.resize(max_location + 1);
+ for (GLint ii = 0; ii <= max_location; ++ii) {
+ attrib_location_to_index_map_[ii] = -1;
+ }
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ attrib_location_to_index_map_[info.location] = ii;
+ }
+
+#if !defined(NDEBUG)
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)) {
+ DVLOG(1) << "----: attribs for service_id: " << service_id();
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ DVLOG(1) << ii << ": loc = " << info.location
+ << ", size = " << info.size
+ << ", type = " << GLES2Util::GetStringEnum(info.type)
+ << ", name = " << info.name;
+ }
+ }
+#endif
+
+ max_len = 0;
+ GLint num_uniforms = 0;
+ glGetProgramiv(service_id_, GL_ACTIVE_UNIFORMS, &num_uniforms);
+ glGetProgramiv(service_id_, GL_ACTIVE_UNIFORM_MAX_LENGTH, &max_len);
+ name_buffer.reset(new char[max_len]);
+
+ // Reads all the names.
+ std::vector<UniformData> uniform_data;
+ for (GLint ii = 0; ii < num_uniforms; ++ii) {
+ GLsizei length = 0;
+ UniformData data;
+ glGetActiveUniform(
+ service_id_, ii, max_len, &length,
+ &data.size, &data.type, name_buffer.get());
+ DCHECK(max_len == 0 || length < max_len);
+ DCHECK(length == 0 || name_buffer[length] == '\0');
+ if (!ProgramManager::IsInvalidPrefix(name_buffer.get(), length)) {
+ data.queried_name = std::string(name_buffer.get());
+ GetCorrectedVariableInfo(
+ true, name_buffer.get(), &data.corrected_name, &data.original_name,
+ &data.size, &data.type);
+ uniform_data.push_back(data);
+ }
+ }
+
+ // NOTE: We don't care if 2 uniforms are bound to the same location.
+ // One of them will take preference. The spec allows this, same as
+ // BindAttribLocation.
+ //
+ // The reason we don't check is if we were to fail we'd have to
+ // restore the previous program but since we've already linked successfully
+ // at this point the previous program is gone.
+
+ // Assigns the uniforms with bindings.
+ size_t next_available_index = 0;
+ for (size_t ii = 0; ii < uniform_data.size(); ++ii) {
+ UniformData& data = uniform_data[ii];
+ data.location = glGetUniformLocation(
+ service_id_, data.queried_name.c_str());
+ // remove "[0]"
+ std::string short_name;
+ int element_index = 0;
+ bool good ALLOW_UNUSED = GetUniformNameSansElement(
+ data.queried_name, &element_index, &short_name);\
+ DCHECK(good);
+ LocationMap::const_iterator it = bind_uniform_location_map_.find(
+ short_name);
+ if (it != bind_uniform_location_map_.end()) {
+ data.added = AddUniformInfo(
+ data.size, data.type, data.location, it->second, data.corrected_name,
+ data.original_name, &next_available_index);
+ }
+ }
+
+ // Assigns the uniforms that were not bound.
+ for (size_t ii = 0; ii < uniform_data.size(); ++ii) {
+ const UniformData& data = uniform_data[ii];
+ if (!data.added) {
+ AddUniformInfo(
+ data.size, data.type, data.location, -1, data.corrected_name,
+ data.original_name, &next_available_index);
+ }
+ }
+
+#if !defined(NDEBUG)
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUServiceLoggingGPU)) {
+ DVLOG(1) << "----: uniforms for service_id: " << service_id();
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ DVLOG(1) << ii << ": loc = " << info.element_locations[0]
+ << ", size = " << info.size
+ << ", type = " << GLES2Util::GetStringEnum(info.type)
+ << ", name = " << info.name;
+ }
+ }
+ }
+#endif
+
+ valid_ = true;
+}
+
+void Program::ExecuteBindAttribLocationCalls() {
+ for (LocationMap::const_iterator it = bind_attrib_location_map_.begin();
+ it != bind_attrib_location_map_.end(); ++it) {
+ const std::string* mapped_name = GetAttribMappedName(it->first);
+ if (mapped_name && *mapped_name != it->first)
+ glBindAttribLocation(service_id_, it->second, mapped_name->c_str());
+ }
+}
+
+bool Program::Link(ShaderManager* manager,
+ ShaderTranslator* vertex_translator,
+ ShaderTranslator* fragment_translator,
+ Program::VaryingsPackingOption varyings_packing_option,
+ const ShaderCacheCallback& shader_callback) {
+ ClearLinkStatus();
+ if (!CanLink()) {
+ set_log_info("missing shaders");
+ return false;
+ }
+ if (DetectAttribLocationBindingConflicts()) {
+ set_log_info("glBindAttribLocation() conflicts");
+ return false;
+ }
+ std::string conflicting_name;
+ if (DetectUniformsMismatch(&conflicting_name)) {
+ std::string info_log = "Uniforms with the same name but different "
+ "type/precision: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (DetectVaryingsMismatch(&conflicting_name)) {
+ std::string info_log = "Varyings with the same name but different type, "
+ "or statically used varyings in fragment shader are "
+ "not declared in vertex shader: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (DetectGlobalNameConflicts(&conflicting_name)) {
+ std::string info_log = "Name conflicts between an uniform and an "
+ "attribute: " + conflicting_name;
+ set_log_info(ProcessLogInfo(info_log).c_str());
+ return false;
+ }
+ if (!CheckVaryingsPacking(varyings_packing_option)) {
+ set_log_info("Varyings over maximum register limit");
+ return false;
+ }
+
+ TimeTicks before_time = TimeTicks::HighResNow();
+ bool link = true;
+ ProgramCache* cache = manager_->program_cache_;
+ if (cache) {
+ DCHECK(!attached_shaders_[0]->signature_source().empty() &&
+ !attached_shaders_[1]->signature_source().empty());
+ ProgramCache::LinkedProgramStatus status = cache->GetLinkedProgramStatus(
+ attached_shaders_[0]->signature_source(),
+ vertex_translator,
+ attached_shaders_[1]->signature_source(),
+ fragment_translator,
+ &bind_attrib_location_map_);
+
+ if (status == ProgramCache::LINK_SUCCEEDED) {
+ ProgramCache::ProgramLoadResult success =
+ cache->LoadLinkedProgram(service_id(),
+ attached_shaders_[0].get(),
+ vertex_translator,
+ attached_shaders_[1].get(),
+ fragment_translator,
+ &bind_attrib_location_map_,
+ shader_callback);
+ link = success != ProgramCache::PROGRAM_LOAD_SUCCESS;
+ UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.LoadBinarySuccess", !link);
+ }
+ }
+
+ if (link) {
+ ExecuteBindAttribLocationCalls();
+ before_time = TimeTicks::HighResNow();
+ if (cache && gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary) {
+ glProgramParameteri(service_id(),
+ PROGRAM_BINARY_RETRIEVABLE_HINT,
+ GL_TRUE);
+ }
+ glLinkProgram(service_id());
+ }
+
+ GLint success = 0;
+ glGetProgramiv(service_id(), GL_LINK_STATUS, &success);
+ if (success == GL_TRUE) {
+ Update();
+ if (link) {
+ if (cache) {
+ cache->SaveLinkedProgram(service_id(),
+ attached_shaders_[0].get(),
+ vertex_translator,
+ attached_shaders_[1].get(),
+ fragment_translator,
+ &bind_attrib_location_map_,
+ shader_callback);
+ }
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "GPU.ProgramCache.BinaryCacheMissTime",
+ (TimeTicks::HighResNow() - before_time).InMicroseconds(),
+ 0,
+ TimeDelta::FromSeconds(10).InMicroseconds(),
+ 50);
+ } else {
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "GPU.ProgramCache.BinaryCacheHitTime",
+ (TimeTicks::HighResNow() - before_time).InMicroseconds(),
+ 0,
+ TimeDelta::FromSeconds(1).InMicroseconds(),
+ 50);
+ }
+ } else {
+ UpdateLogInfo();
+ }
+ return success == GL_TRUE;
+}
+
+void Program::Validate() {
+ if (!IsValid()) {
+ set_log_info("program not linked");
+ return;
+ }
+ glValidateProgram(service_id());
+ UpdateLogInfo();
+}
+
+GLint Program::GetUniformFakeLocation(
+ const std::string& name) const {
+ bool getting_array_location = false;
+ size_t open_pos = std::string::npos;
+ int index = 0;
+ if (!GLES2Util::ParseUniformName(
+ name, &open_pos, &index, &getting_array_location)) {
+ return -1;
+ }
+ for (GLuint ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (!info.IsValid()) {
+ continue;
+ }
+ if (info.name == name ||
+ (info.is_array &&
+ info.name.compare(0, info.name.size() - 3, name) == 0)) {
+ return info.fake_location_base;
+ } else if (getting_array_location && info.is_array) {
+ // Look for an array specification.
+ size_t open_pos_2 = info.name.find_last_of('[');
+ if (open_pos_2 == open_pos &&
+ name.compare(0, open_pos, info.name, 0, open_pos) == 0) {
+ if (index >= 0 && index < info.size) {
+ DCHECK_GT(static_cast<int>(info.element_locations.size()), index);
+ if (info.element_locations[index] == -1)
+ return -1;
+ return ProgramManager::MakeFakeLocation(
+ info.fake_location_base, index);
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+GLint Program::GetAttribLocation(
+ const std::string& name) const {
+ for (GLuint ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ if (info.name == name) {
+ return info.location;
+ }
+ }
+ return -1;
+}
+
+const Program::UniformInfo*
+ Program::GetUniformInfoByFakeLocation(
+ GLint fake_location, GLint* real_location, GLint* array_index) const {
+ DCHECK(real_location);
+ DCHECK(array_index);
+ if (fake_location < 0) {
+ return NULL;
+ }
+
+ GLint uniform_index = GetUniformInfoIndexFromFakeLocation(fake_location);
+ if (uniform_index >= 0 &&
+ static_cast<size_t>(uniform_index) < uniform_infos_.size()) {
+ const UniformInfo& uniform_info = uniform_infos_[uniform_index];
+ if (!uniform_info.IsValid()) {
+ return NULL;
+ }
+ GLint element_index = GetArrayElementIndexFromFakeLocation(fake_location);
+ if (element_index < uniform_info.size) {
+ *real_location = uniform_info.element_locations[element_index];
+ *array_index = element_index;
+ return &uniform_info;
+ }
+ }
+ return NULL;
+}
+
+const std::string* Program::GetAttribMappedName(
+ const std::string& original_name) const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const std::string* mapped_name =
+ shader->GetAttribMappedName(original_name);
+ if (mapped_name)
+ return mapped_name;
+ }
+ }
+ return NULL;
+}
+
+const std::string* Program::GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const std::string* original_name =
+ shader->GetOriginalNameFromHashedName(hashed_name);
+ if (original_name)
+ return original_name;
+ }
+ }
+ return NULL;
+}
+
+bool Program::SetUniformLocationBinding(
+ const std::string& name, GLint location) {
+ std::string short_name;
+ int element_index = 0;
+ if (!GetUniformNameSansElement(name, &element_index, &short_name) ||
+ element_index != 0) {
+ return false;
+ }
+
+ bind_uniform_location_map_[short_name] = location;
+ return true;
+}
+
+// Note: This is only valid to call right after a program has been linked
+// successfully.
+void Program::GetCorrectedVariableInfo(
+ bool use_uniforms,
+ const std::string& name, std::string* corrected_name,
+ std::string* original_name,
+ GLsizei* size, GLenum* type) const {
+ DCHECK(corrected_name);
+ DCHECK(original_name);
+ DCHECK(size);
+ DCHECK(type);
+ const char* kArraySpec = "[0]";
+ for (int jj = 0; jj < 2; ++jj) {
+ std::string test_name(name + ((jj == 1) ? kArraySpec : ""));
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ Shader* shader = attached_shaders_[ii].get();
+ if (shader) {
+ const Shader::VariableInfo* variable_info =
+ use_uniforms ? shader->GetUniformInfo(test_name) :
+ shader->GetAttribInfo(test_name);
+ // Note: There is an assuption here that if an attrib is defined in more
+ // than 1 attached shader their types and sizes match. Should we check
+ // for that case?
+ if (variable_info) {
+ *corrected_name = test_name;
+ *original_name = variable_info->name;
+ *type = variable_info->type;
+ *size = variable_info->size;
+ return;
+ }
+ }
+ }
+ }
+ *corrected_name = name;
+ *original_name = name;
+}
+
+bool Program::AddUniformInfo(
+ GLsizei size, GLenum type, GLint location, GLint fake_base_location,
+ const std::string& name, const std::string& original_name,
+ size_t* next_available_index) {
+ DCHECK(next_available_index);
+ const char* kArraySpec = "[0]";
+ size_t uniform_index =
+ fake_base_location >= 0 ? fake_base_location : *next_available_index;
+ if (uniform_infos_.size() < uniform_index + 1) {
+ uniform_infos_.resize(uniform_index + 1);
+ }
+
+ // return if this location is already in use.
+ if (uniform_infos_[uniform_index].IsValid()) {
+ DCHECK_GE(fake_base_location, 0);
+ return false;
+ }
+
+ uniform_infos_[uniform_index] = UniformInfo(
+ size, type, uniform_index, original_name);
+ ++num_uniforms_;
+
+ UniformInfo& info = uniform_infos_[uniform_index];
+ info.element_locations.resize(size);
+ info.element_locations[0] = location;
+ DCHECK_GE(size, 0);
+ size_t num_texture_units = info.IsSampler() ? static_cast<size_t>(size) : 0u;
+ info.texture_units.clear();
+ info.texture_units.resize(num_texture_units, 0);
+
+ if (size > 1) {
+ // Go through the array element locations looking for a match.
+ // We can skip the first element because it's the same as the
+ // the location without the array operators.
+ size_t array_pos = name.rfind(kArraySpec);
+ std::string base_name = name;
+ if (name.size() > 3) {
+ if (array_pos != name.size() - 3) {
+ info.name = name + kArraySpec;
+ } else {
+ base_name = name.substr(0, name.size() - 3);
+ }
+ }
+ for (GLsizei ii = 1; ii < info.size; ++ii) {
+ std::string element_name(base_name + "[" + base::IntToString(ii) + "]");
+ info.element_locations[ii] =
+ glGetUniformLocation(service_id_, element_name.c_str());
+ }
+ }
+
+ info.is_array =
+ (size > 1 ||
+ (info.name.size() > 3 &&
+ info.name.rfind(kArraySpec) == info.name.size() - 3));
+
+ if (info.IsSampler()) {
+ sampler_indices_.push_back(info.fake_location_base);
+ }
+ max_uniform_name_length_ =
+ std::max(max_uniform_name_length_,
+ static_cast<GLsizei>(info.name.size()));
+
+ while (*next_available_index < uniform_infos_.size() &&
+ uniform_infos_[*next_available_index].IsValid()) {
+ *next_available_index = *next_available_index + 1;
+ }
+
+ return true;
+}
+
+const Program::UniformInfo*
+ Program::GetUniformInfo(
+ GLint index) const {
+ if (static_cast<size_t>(index) >= uniform_infos_.size()) {
+ return NULL;
+ }
+
+ const UniformInfo& info = uniform_infos_[index];
+ return info.IsValid() ? &info : NULL;
+}
+
+bool Program::SetSamplers(
+ GLint num_texture_units, GLint fake_location,
+ GLsizei count, const GLint* value) {
+ if (fake_location < 0) {
+ return true;
+ }
+ GLint uniform_index = GetUniformInfoIndexFromFakeLocation(fake_location);
+ if (uniform_index >= 0 &&
+ static_cast<size_t>(uniform_index) < uniform_infos_.size()) {
+ UniformInfo& info = uniform_infos_[uniform_index];
+ if (!info.IsValid()) {
+ return false;
+ }
+ GLint element_index = GetArrayElementIndexFromFakeLocation(fake_location);
+ if (element_index < info.size) {
+ count = std::min(info.size - element_index, count);
+ if (info.IsSampler() && count > 0) {
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (value[ii] < 0 || value[ii] >= num_texture_units) {
+ return false;
+ }
+ }
+ std::copy(value, value + count,
+ info.texture_units.begin() + element_index);
+ return true;
+ }
+ }
+ }
+ return true;
+}
+
+void Program::GetProgramiv(GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_ACTIVE_ATTRIBUTES:
+ *params = attrib_infos_.size();
+ break;
+ case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = max_attrib_name_length_ + 1;
+ break;
+ case GL_ACTIVE_UNIFORMS:
+ *params = num_uniforms_;
+ break;
+ case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = max_uniform_name_length_ + 1;
+ break;
+ case GL_LINK_STATUS:
+ *params = link_status_;
+ break;
+ case GL_INFO_LOG_LENGTH:
+ // Notice +1 to accomodate NULL terminator.
+ *params = log_info_.get() ? (log_info_->size() + 1) : 0;
+ break;
+ case GL_DELETE_STATUS:
+ *params = deleted_;
+ break;
+ case GL_VALIDATE_STATUS:
+ if (!IsValid()) {
+ *params = GL_FALSE;
+ } else {
+ glGetProgramiv(service_id_, pname, params);
+ }
+ break;
+ default:
+ glGetProgramiv(service_id_, pname, params);
+ break;
+ }
+}
+
+bool Program::AttachShader(
+ ShaderManager* shader_manager,
+ Shader* shader) {
+ DCHECK(shader_manager);
+ DCHECK(shader);
+ int index = ShaderTypeToIndex(shader->shader_type());
+ if (attached_shaders_[index].get() != NULL) {
+ return false;
+ }
+ attached_shaders_[index] = scoped_refptr<Shader>(shader);
+ shader_manager->UseShader(shader);
+ return true;
+}
+
+bool Program::DetachShader(
+ ShaderManager* shader_manager,
+ Shader* shader) {
+ DCHECK(shader_manager);
+ DCHECK(shader);
+ if (attached_shaders_[ShaderTypeToIndex(shader->shader_type())].get() !=
+ shader) {
+ return false;
+ }
+ attached_shaders_[ShaderTypeToIndex(shader->shader_type())] = NULL;
+ shader_manager->UnuseShader(shader);
+ return true;
+}
+
+void Program::DetachShaders(ShaderManager* shader_manager) {
+ DCHECK(shader_manager);
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (attached_shaders_[ii].get()) {
+ DetachShader(shader_manager, attached_shaders_[ii].get());
+ }
+ }
+}
+
+bool Program::CanLink() const {
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (!attached_shaders_[ii].get() || !attached_shaders_[ii]->valid()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Program::DetectAttribLocationBindingConflicts() const {
+ std::set<GLint> location_binding_used;
+ for (LocationMap::const_iterator it = bind_attrib_location_map_.begin();
+ it != bind_attrib_location_map_.end(); ++it) {
+ // Find out if an attribute is declared in this program's shaders.
+ bool active = false;
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ if (!attached_shaders_[ii].get() || !attached_shaders_[ii]->valid())
+ continue;
+ if (attached_shaders_[ii]->GetAttribInfo(it->first)) {
+ active = true;
+ break;
+ }
+ }
+ if (active) {
+ std::pair<std::set<GLint>::iterator, bool> result =
+ location_binding_used.insert(it->second);
+ if (!result.second)
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Program::DetectUniformsMismatch(std::string* conflicting_name) const {
+ typedef std::map<std::string, UniformType> UniformMap;
+ UniformMap uniform_map;
+ for (int ii = 0; ii < kMaxAttachedShaders; ++ii) {
+ const ShaderTranslator::VariableMap& shader_uniforms =
+ attached_shaders_[ii]->uniform_map();
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ shader_uniforms.begin();
+ iter != shader_uniforms.end(); ++iter) {
+ const std::string& name = iter->first;
+ UniformType type(iter->second);
+ UniformMap::iterator map_entry = uniform_map.find(name);
+ if (map_entry == uniform_map.end()) {
+ uniform_map[name] = type;
+ } else {
+ // If a uniform is already in the map, i.e., it has already been
+ // declared by other shader, then the type and precision must match.
+ if (map_entry->second == type)
+ continue;
+ *conflicting_name = name;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Program::DetectVaryingsMismatch(std::string* conflicting_name) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* vertex_varyings =
+ &(attached_shaders_[0]->varying_map());
+ const ShaderTranslator::VariableMap* fragment_varyings =
+ &(attached_shaders_[1]->varying_map());
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ fragment_varyings->begin();
+ iter != fragment_varyings->end(); ++iter) {
+ const std::string& name = iter->first;
+ if (IsBuiltInVarying(name))
+ continue;
+
+ ShaderTranslator::VariableMap::const_iterator hit =
+ vertex_varyings->find(name);
+ if (hit == vertex_varyings->end()) {
+ if (iter->second.static_use) {
+ *conflicting_name = name;
+ return true;
+ }
+ continue;
+ }
+
+ if (hit->second.type != iter->second.type ||
+ hit->second.size != iter->second.size) {
+ *conflicting_name = name;
+ return true;
+ }
+
+ }
+ return false;
+}
+
+bool Program::DetectGlobalNameConflicts(std::string* conflicting_name) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* uniforms[2];
+ uniforms[0] = &(attached_shaders_[0]->uniform_map());
+ uniforms[1] = &(attached_shaders_[1]->uniform_map());
+ const ShaderTranslator::VariableMap* attribs =
+ &(attached_shaders_[0]->attrib_map());
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ attribs->begin(); iter != attribs->end(); ++iter) {
+ for (int ii = 0; ii < 2; ++ii) {
+ if (uniforms[ii]->find(iter->first) != uniforms[ii]->end()) {
+ *conflicting_name = iter->first;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Program::CheckVaryingsPacking(
+ Program::VaryingsPackingOption option) const {
+ DCHECK(attached_shaders_[0].get() &&
+ attached_shaders_[0]->shader_type() == GL_VERTEX_SHADER &&
+ attached_shaders_[1].get() &&
+ attached_shaders_[1]->shader_type() == GL_FRAGMENT_SHADER);
+ const ShaderTranslator::VariableMap* vertex_varyings =
+ &(attached_shaders_[0]->varying_map());
+ const ShaderTranslator::VariableMap* fragment_varyings =
+ &(attached_shaders_[1]->varying_map());
+
+ std::map<std::string, ShVariableInfo> combined_map;
+
+ for (ShaderTranslator::VariableMap::const_iterator iter =
+ fragment_varyings->begin();
+ iter != fragment_varyings->end(); ++iter) {
+ if (!iter->second.static_use && option == kCountOnlyStaticallyUsed)
+ continue;
+ if (!IsBuiltInVarying(iter->first)) {
+ ShaderTranslator::VariableMap::const_iterator vertex_iter =
+ vertex_varyings->find(iter->first);
+ if (vertex_iter == vertex_varyings->end() ||
+ (!vertex_iter->second.static_use &&
+ option == kCountOnlyStaticallyUsed))
+ continue;
+ }
+
+ ShVariableInfo var;
+ var.type = static_cast<sh::GLenum>(iter->second.type);
+ var.size = iter->second.size;
+ combined_map[iter->first] = var;
+ }
+
+ if (combined_map.size() == 0)
+ return true;
+ scoped_ptr<ShVariableInfo[]> variables(
+ new ShVariableInfo[combined_map.size()]);
+ size_t index = 0;
+ for (std::map<std::string, ShVariableInfo>::const_iterator iter =
+ combined_map.begin();
+ iter != combined_map.end(); ++iter) {
+ variables[index].type = iter->second.type;
+ variables[index].size = iter->second.size;
+ ++index;
+ }
+ return ShCheckVariablesWithinPackingLimits(
+ static_cast<int>(manager_->max_varying_vectors()),
+ variables.get(),
+ combined_map.size()) == 1;
+}
+
+static uint32 ComputeOffset(const void* start, const void* position) {
+ return static_cast<const uint8*>(position) -
+ static_cast<const uint8*>(start);
+}
+
+void Program::GetProgramInfo(
+ ProgramManager* manager, CommonDecoder::Bucket* bucket) const {
+ // NOTE: It seems to me the math in here does not need check for overflow
+ // because the data being calucated from has various small limits. The max
+ // number of attribs + uniforms is somewhere well under 1024. The maximum size
+ // of an identifier is 256 characters.
+ uint32 num_locations = 0;
+ uint32 total_string_size = 0;
+
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ num_locations += 1;
+ total_string_size += info.name.size();
+ }
+
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ num_locations += info.element_locations.size();
+ total_string_size += info.name.size();
+ }
+ }
+
+ uint32 num_inputs = attrib_infos_.size() + num_uniforms_;
+ uint32 input_size = num_inputs * sizeof(ProgramInput);
+ uint32 location_size = num_locations * sizeof(int32);
+ uint32 size = sizeof(ProgramInfoHeader) +
+ input_size + location_size + total_string_size;
+
+ bucket->SetSize(size);
+ ProgramInfoHeader* header = bucket->GetDataAs<ProgramInfoHeader*>(0, size);
+ ProgramInput* inputs = bucket->GetDataAs<ProgramInput*>(
+ sizeof(ProgramInfoHeader), input_size);
+ int32* locations = bucket->GetDataAs<int32*>(
+ sizeof(ProgramInfoHeader) + input_size, location_size);
+ char* strings = bucket->GetDataAs<char*>(
+ sizeof(ProgramInfoHeader) + input_size + location_size,
+ total_string_size);
+ DCHECK(header);
+ DCHECK(inputs);
+ DCHECK(locations);
+ DCHECK(strings);
+
+ header->link_status = link_status_;
+ header->num_attribs = attrib_infos_.size();
+ header->num_uniforms = num_uniforms_;
+
+ for (size_t ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ inputs->size = info.size;
+ inputs->type = info.type;
+ inputs->location_offset = ComputeOffset(header, locations);
+ inputs->name_offset = ComputeOffset(header, strings);
+ inputs->name_length = info.name.size();
+ *locations++ = info.location;
+ memcpy(strings, info.name.c_str(), info.name.size());
+ strings += info.name.size();
+ ++inputs;
+ }
+
+ for (size_t ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.IsValid()) {
+ inputs->size = info.size;
+ inputs->type = info.type;
+ inputs->location_offset = ComputeOffset(header, locations);
+ inputs->name_offset = ComputeOffset(header, strings);
+ inputs->name_length = info.name.size();
+ DCHECK(static_cast<size_t>(info.size) == info.element_locations.size());
+ for (size_t jj = 0; jj < info.element_locations.size(); ++jj) {
+ if (info.element_locations[jj] == -1)
+ *locations++ = -1;
+ else
+ *locations++ = ProgramManager::MakeFakeLocation(ii, jj);
+ }
+ memcpy(strings, info.name.c_str(), info.name.size());
+ strings += info.name.size();
+ ++inputs;
+ }
+ }
+
+ DCHECK_EQ(ComputeOffset(header, strings), size);
+}
+
+Program::~Program() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ glDeleteProgram(service_id());
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+
+ProgramManager::ProgramManager(ProgramCache* program_cache,
+ uint32 max_varying_vectors)
+ : program_count_(0),
+ have_context_(true),
+ program_cache_(program_cache),
+ max_varying_vectors_(max_varying_vectors) { }
+
+ProgramManager::~ProgramManager() {
+ DCHECK(programs_.empty());
+}
+
+void ProgramManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ programs_.clear();
+}
+
+void ProgramManager::StartTracking(Program* /* program */) {
+ ++program_count_;
+}
+
+void ProgramManager::StopTracking(Program* /* program */) {
+ --program_count_;
+}
+
+Program* ProgramManager::CreateProgram(
+ GLuint client_id, GLuint service_id) {
+ std::pair<ProgramMap::iterator, bool> result =
+ programs_.insert(
+ std::make_pair(client_id,
+ scoped_refptr<Program>(
+ new Program(this, service_id))));
+ DCHECK(result.second);
+ return result.first->second.get();
+}
+
+Program* ProgramManager::GetProgram(GLuint client_id) {
+ ProgramMap::iterator it = programs_.find(client_id);
+ return it != programs_.end() ? it->second.get() : NULL;
+}
+
+bool ProgramManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (ProgramMap::const_iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+ProgramCache* ProgramManager::program_cache() const {
+ return program_cache_;
+}
+
+bool ProgramManager::IsOwned(Program* program) {
+ for (ProgramMap::iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second.get() == program) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ProgramManager::RemoveProgramInfoIfUnused(
+ ShaderManager* shader_manager, Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ if (program->IsDeleted() && !program->InUse()) {
+ program->DetachShaders(shader_manager);
+ for (ProgramMap::iterator it = programs_.begin();
+ it != programs_.end(); ++it) {
+ if (it->second.get() == program) {
+ programs_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+}
+
+void ProgramManager::MarkAsDeleted(
+ ShaderManager* shader_manager,
+ Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->MarkAsDeleted();
+ RemoveProgramInfoIfUnused(shader_manager, program);
+}
+
+void ProgramManager::UseProgram(Program* program) {
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->IncUseCount();
+}
+
+void ProgramManager::UnuseProgram(
+ ShaderManager* shader_manager,
+ Program* program) {
+ DCHECK(shader_manager);
+ DCHECK(program);
+ DCHECK(IsOwned(program));
+ program->DecUseCount();
+ RemoveProgramInfoIfUnused(shader_manager, program);
+}
+
+void ProgramManager::ClearUniforms(Program* program) {
+ DCHECK(program);
+ program->ClearUniforms(&zero_);
+}
+
+int32 ProgramManager::MakeFakeLocation(int32 index, int32 element) {
+ return index + element * 0x10000;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/program_manager.h b/gpu/command_buffer/service/program_manager.h
new file mode 100644
index 0000000..bcc3630
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager.h
@@ -0,0 +1,435 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramCache;
+class ProgramManager;
+class Shader;
+class ShaderManager;
+class ShaderTranslator;
+
+// This is used to track which attributes a particular program needs
+// so we can verify at glDrawXXX time that every attribute is either disabled
+// or if enabled that it points to a valid source.
+class GPU_EXPORT Program : public base::RefCounted<Program> {
+ public:
+ static const int kMaxAttachedShaders = 2;
+
+ enum VaryingsPackingOption {
+ kCountOnlyStaticallyUsed,
+ kCountAll
+ };
+
+ enum UniformApiType {
+ kUniform1i = 1 << 0,
+ kUniform2i = 1 << 1,
+ kUniform3i = 1 << 2,
+ kUniform4i = 1 << 3,
+ kUniform1f = 1 << 4,
+ kUniform2f = 1 << 5,
+ kUniform3f = 1 << 6,
+ kUniform4f = 1 << 7,
+ kUniformMatrix2f = 1 << 8,
+ kUniformMatrix3f = 1 << 9,
+ kUniformMatrix4f = 1 << 10,
+ };
+
+ struct UniformInfo {
+ UniformInfo();
+ UniformInfo(
+ GLsizei _size, GLenum _type, GLint _fake_location_base,
+ const std::string& _name);
+ ~UniformInfo();
+
+ bool IsValid() const {
+ return size != 0;
+ }
+
+ bool IsSampler() const {
+ return type == GL_SAMPLER_2D || type == GL_SAMPLER_2D_RECT_ARB ||
+ type == GL_SAMPLER_CUBE || type == GL_SAMPLER_EXTERNAL_OES;
+ }
+
+ GLsizei size;
+ GLenum type;
+ uint32 accepts_api_type;
+ GLint fake_location_base;
+ bool is_array;
+ std::string name;
+ std::vector<GLint> element_locations;
+ std::vector<GLuint> texture_units;
+ };
+ struct VertexAttrib {
+ VertexAttrib(GLsizei _size, GLenum _type, const std::string& _name,
+ GLint _location)
+ : size(_size),
+ type(_type),
+ location(_location),
+ name(_name) {
+ }
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ std::string name;
+ };
+
+ typedef std::vector<UniformInfo> UniformInfoVector;
+ typedef std::vector<VertexAttrib> AttribInfoVector;
+ typedef std::vector<int> SamplerIndices;
+ typedef std::map<std::string, GLint> LocationMap;
+
+ Program(ProgramManager* manager, GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ const SamplerIndices& sampler_indices() {
+ return sampler_indices_;
+ }
+
+ const AttribInfoVector& GetAttribInfos() const {
+ return attrib_infos_;
+ }
+
+ const VertexAttrib* GetAttribInfo(GLint index) const {
+ return (static_cast<size_t>(index) < attrib_infos_.size()) ?
+ &attrib_infos_[index] : NULL;
+ }
+
+ GLint GetAttribLocation(const std::string& name) const;
+
+ const VertexAttrib* GetAttribInfoByLocation(GLuint location) const {
+ if (location < attrib_location_to_index_map_.size()) {
+ GLint index = attrib_location_to_index_map_[location];
+ if (index >= 0) {
+ return &attrib_infos_[index];
+ }
+ }
+ return NULL;
+ }
+
+ const UniformInfo* GetUniformInfo(GLint index) const;
+
+ // If the original name is not found, return NULL.
+ const std::string* GetAttribMappedName(
+ const std::string& original_name) const;
+
+ // If the hashed name is not found, return NULL.
+ const std::string* GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const;
+
+ // Gets the fake location of a uniform by name.
+ GLint GetUniformFakeLocation(const std::string& name) const;
+
+ // Gets the UniformInfo of a uniform by location.
+ const UniformInfo* GetUniformInfoByFakeLocation(
+ GLint fake_location, GLint* real_location, GLint* array_index) const;
+
+ // Gets all the program info.
+ void GetProgramInfo(
+ ProgramManager* manager, CommonDecoder::Bucket* bucket) const;
+
+ // Sets the sampler values for a uniform.
+ // This is safe to call for any location. If the location is not
+ // a sampler uniform nothing will happen.
+ // Returns false if fake_location is a sampler and any value
+ // is >= num_texture_units. Returns true otherwise.
+ bool SetSamplers(
+ GLint num_texture_units, GLint fake_location,
+ GLsizei count, const GLint* value);
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ void GetProgramiv(GLenum pname, GLint* params);
+
+ bool IsValid() const {
+ return valid_;
+ }
+
+ bool AttachShader(ShaderManager* manager, Shader* shader);
+ bool DetachShader(ShaderManager* manager, Shader* shader);
+
+ bool CanLink() const;
+
+ // Performs glLinkProgram and related activities.
+ bool Link(ShaderManager* manager,
+ ShaderTranslator* vertex_translator,
+ ShaderTranslator* fragment_shader,
+ VaryingsPackingOption varyings_packing_option,
+ const ShaderCacheCallback& shader_callback);
+
+ // Performs glValidateProgram and related activities.
+ void Validate();
+
+ const std::string* log_info() const {
+ return log_info_.get();
+ }
+
+ bool InUse() const {
+ DCHECK_GE(use_count_, 0);
+ return use_count_ != 0;
+ }
+
+ // Sets attribute-location binding from a glBindAttribLocation() call.
+ void SetAttribLocationBinding(const std::string& attrib, GLint location) {
+ bind_attrib_location_map_[attrib] = location;
+ }
+
+ // Sets uniform-location binding from a glBindUniformLocationCHROMIUM call.
+ // returns false if error.
+ bool SetUniformLocationBinding(const std::string& name, GLint location);
+
+ // Detects if there are attribute location conflicts from
+ // glBindAttribLocation() calls.
+ // We only consider the declared attributes in the program.
+ bool DetectAttribLocationBindingConflicts() const;
+
+ // Detects if there are uniforms of the same name but different type
+ // or precision in vertex/fragment shaders.
+ // Return true and set the first found conflicting hashed name to
+ // conflicting_name if such cases are detected.
+ bool DetectUniformsMismatch(std::string* conflicting_name) const;
+
+ // Return true if a varying is statically used in fragment shader, but it
+ // is not declared in vertex shader.
+ bool DetectVaryingsMismatch(std::string* conflicting_name) const;
+
+ // Return true if an uniform and an attribute share the same name.
+ bool DetectGlobalNameConflicts(std::string* conflicting_name) const;
+
+ // Return false if varyings can't be packed into the max available
+ // varying registers.
+ bool CheckVaryingsPacking(VaryingsPackingOption option) const;
+
+ // Visible for testing
+ const LocationMap& bind_attrib_location_map() const {
+ return bind_attrib_location_map_;
+ }
+
+ private:
+ friend class base::RefCounted<Program>;
+ friend class ProgramManager;
+
+ ~Program();
+
+ void set_log_info(const char* str) {
+ log_info_.reset(str ? new std::string(str) : NULL);
+ }
+
+ void ClearLinkStatus() {
+ link_status_ = false;
+ }
+
+ void IncUseCount() {
+ ++use_count_;
+ }
+
+ void DecUseCount() {
+ --use_count_;
+ DCHECK_GE(use_count_, 0);
+ }
+
+ void MarkAsDeleted() {
+ DCHECK(!deleted_);
+ deleted_ = true;
+ }
+
+ // Resets the program.
+ void Reset();
+
+ // Updates the program info after a successful link.
+ void Update();
+
+ // Process the program log, replacing the hashed names with original names.
+ std::string ProcessLogInfo(const std::string& log);
+
+ // Updates the program log info from GL
+ void UpdateLogInfo();
+
+ // Clears all the uniforms.
+ void ClearUniforms(std::vector<uint8>* zero_buffer);
+
+ // If long attribate names are mapped during shader translation, call
+ // glBindAttribLocation() again with the mapped names.
+ // This is called right before the glLink() call, but after shaders are
+ // translated.
+ void ExecuteBindAttribLocationCalls();
+
+ bool AddUniformInfo(
+ GLsizei size, GLenum type, GLint location, GLint fake_base_location,
+ const std::string& name, const std::string& original_name,
+ size_t* next_available_index);
+
+ void GetCorrectedVariableInfo(
+ bool use_uniforms, const std::string& name, std::string* corrected_name,
+ std::string* original_name, GLsizei* size, GLenum* type) const;
+
+ void DetachShaders(ShaderManager* manager);
+
+ static inline GLint GetUniformInfoIndexFromFakeLocation(
+ GLint fake_location) {
+ return fake_location & 0xFFFF;
+ }
+
+ static inline GLint GetArrayElementIndexFromFakeLocation(
+ GLint fake_location) {
+ return (fake_location >> 16) & 0xFFFF;
+ }
+
+ ProgramManager* manager_;
+
+ int use_count_;
+
+ GLsizei max_attrib_name_length_;
+
+ // Attrib by index.
+ AttribInfoVector attrib_infos_;
+
+ // Attrib by location to index.
+ std::vector<GLint> attrib_location_to_index_map_;
+
+ GLsizei max_uniform_name_length_;
+
+ // Uniform info by index.
+ UniformInfoVector uniform_infos_;
+
+ // The indices of the uniforms that are samplers.
+ SamplerIndices sampler_indices_;
+
+ // The program this Program is tracking.
+ GLuint service_id_;
+
+ // Shaders by type of shader.
+ scoped_refptr<Shader>
+ attached_shaders_[kMaxAttachedShaders];
+
+ // True if this program is marked as deleted.
+ bool deleted_;
+
+ // This is true if glLinkProgram was successful at least once.
+ bool valid_;
+
+ // This is true if glLinkProgram was successful last time it was called.
+ bool link_status_;
+
+ // True if the uniforms have been cleared.
+ bool uniforms_cleared_;
+
+ // This is different than uniform_infos_.size() because
+ // that is a sparce array.
+ GLint num_uniforms_;
+
+ // Log info
+ scoped_ptr<std::string> log_info_;
+
+ // attribute-location binding map from glBindAttribLocation() calls.
+ LocationMap bind_attrib_location_map_;
+
+ // uniform-location binding map from glBindUniformLocationCHROMIUM() calls.
+ LocationMap bind_uniform_location_map_;
+};
+
+// Tracks the Programs.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT ProgramManager {
+ public:
+ explicit ProgramManager(ProgramCache* program_cache,
+ uint32 max_varying_vectors);
+ ~ProgramManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a new program.
+ Program* CreateProgram(GLuint client_id, GLuint service_id);
+
+ // Gets a program.
+ Program* GetProgram(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ // Gets the shader cache
+ ProgramCache* program_cache() const;
+
+ // Marks a program as deleted. If it is not used the program will be deleted.
+ void MarkAsDeleted(ShaderManager* shader_manager, Program* program);
+
+ // Marks a program as used.
+ void UseProgram(Program* program);
+
+ // Makes a program as unused. If deleted the program will be removed.
+ void UnuseProgram(ShaderManager* shader_manager, Program* program);
+
+ // Clears the uniforms for this program.
+ void ClearUniforms(Program* program);
+
+ // Returns true if prefix is invalid for gl.
+ static bool IsInvalidPrefix(const char* name, size_t length);
+
+ // Check if a Program is owned by this ProgramManager.
+ bool IsOwned(Program* program);
+
+ static int32 MakeFakeLocation(int32 index, int32 element);
+
+ uint32 max_varying_vectors() const {
+ return max_varying_vectors_;
+ }
+
+ private:
+ friend class Program;
+
+ void StartTracking(Program* program);
+ void StopTracking(Program* program);
+
+ void RemoveProgramInfoIfUnused(
+ ShaderManager* shader_manager, Program* program);
+
+ // Info for each "successfully linked" program by service side program Id.
+ // TODO(gman): Choose a faster container.
+ typedef std::map<GLuint, scoped_refptr<Program> > ProgramMap;
+ ProgramMap programs_;
+
+ // Counts the number of Program allocated with 'this' as its manager.
+ // Allows to check no Program will outlive this.
+ unsigned int program_count_;
+
+ bool have_context_;
+
+ // Used to clear uniforms.
+ std::vector<uint8> zero_;
+
+ ProgramCache* program_cache_;
+
+ uint32 max_varying_vectors_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
diff --git a/gpu/command_buffer/service/program_manager_unittest.cc b/gpu/command_buffer/service/program_manager_unittest.cc
new file mode 100644
index 0000000..3cca263
--- /dev/null
+++ b/gpu/command_buffer/service/program_manager_unittest.cc
@@ -0,0 +1,1724 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/program_manager.h"
+
+#include <algorithm>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/common_decoder.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::ReturnRef;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+const uint32 kMaxVaryingVectors = 8;
+
+void ShaderCacheCb(const std::string& key, const std::string& shader) {}
+} // namespace anonymous
+
+class ProgramManagerTest : public GpuServiceTest {
+ public:
+ ProgramManagerTest() : manager_(NULL, kMaxVaryingVectors) { }
+ virtual ~ProgramManagerTest() {
+ manager_.Destroy(false);
+ }
+
+ protected:
+ ProgramManager manager_;
+};
+
+TEST_F(ProgramManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ // Check we can create program.
+ manager_.CreateProgram(kClient1Id, kService1Id);
+ // Check program got created.
+ Program* program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_TRUE(program1 != NULL);
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_.GetClientId(program1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent program.
+ EXPECT_TRUE(manager_.GetProgram(kClient2Id) == NULL);
+}
+
+TEST_F(ProgramManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create program.
+ Program* program0 = manager_.CreateProgram(kClient1Id, kService1Id);
+ ASSERT_TRUE(program0 != NULL);
+ // Check program got created.
+ Program* program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_EQ(program0, program1);
+ EXPECT_CALL(*gl_, DeleteProgram(kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check the resources were released.
+ program1 = manager_.GetProgram(kClient1Id);
+ ASSERT_TRUE(program1 == NULL);
+}
+
+TEST_F(ProgramManagerTest, DeleteBug) {
+ ShaderManager shader_manager;
+ const GLuint kClient1Id = 1;
+ const GLuint kClient2Id = 2;
+ const GLuint kService1Id = 11;
+ const GLuint kService2Id = 12;
+ // Check we can create program.
+ scoped_refptr<Program> program1(
+ manager_.CreateProgram(kClient1Id, kService1Id));
+ scoped_refptr<Program> program2(
+ manager_.CreateProgram(kClient2Id, kService2Id));
+ // Check program got created.
+ ASSERT_TRUE(program1.get());
+ ASSERT_TRUE(program2.get());
+ manager_.UseProgram(program1.get());
+ manager_.MarkAsDeleted(&shader_manager, program1.get());
+ // Program will be deleted when last ref is released.
+ EXPECT_CALL(*gl_, DeleteProgram(kService2Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.MarkAsDeleted(&shader_manager, program2.get());
+ EXPECT_TRUE(manager_.IsOwned(program1.get()));
+ EXPECT_FALSE(manager_.IsOwned(program2.get()));
+}
+
+TEST_F(ProgramManagerTest, Program) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create program.
+ Program* program1 = manager_.CreateProgram(
+ kClient1Id, kService1Id);
+ ASSERT_TRUE(program1);
+ EXPECT_EQ(kService1Id, program1->service_id());
+ EXPECT_FALSE(program1->InUse());
+ EXPECT_FALSE(program1->IsValid());
+ EXPECT_FALSE(program1->IsDeleted());
+ EXPECT_FALSE(program1->CanLink());
+ EXPECT_TRUE(program1->log_info() == NULL);
+}
+
+class ProgramManagerWithShaderTest : public GpuServiceTest {
+ public:
+ ProgramManagerWithShaderTest()
+ : manager_(NULL, kMaxVaryingVectors), program_(NULL) {
+ }
+
+ virtual ~ProgramManagerWithShaderTest() {
+ manager_.Destroy(false);
+ shader_manager_.Destroy(false);
+ }
+
+ static const GLint kNumVertexAttribs = 16;
+
+ static const GLuint kClientProgramId = 123;
+ static const GLuint kServiceProgramId = 456;
+ static const GLuint kVertexShaderClientId = 201;
+ static const GLuint kFragmentShaderClientId = 202;
+ static const GLuint kVertexShaderServiceId = 301;
+ static const GLuint kFragmentShaderServiceId = 302;
+
+ static const char* kAttrib1Name;
+ static const char* kAttrib2Name;
+ static const char* kAttrib3Name;
+ static const GLint kAttrib1Size = 1;
+ static const GLint kAttrib2Size = 1;
+ static const GLint kAttrib3Size = 1;
+ static const int kAttrib1Precision = SH_PRECISION_MEDIUMP;
+ static const int kAttrib2Precision = SH_PRECISION_HIGHP;
+ static const int kAttrib3Precision = SH_PRECISION_LOWP;
+ static const int kAttribStaticUse = 0;
+ static const GLint kAttrib1Location = 0;
+ static const GLint kAttrib2Location = 1;
+ static const GLint kAttrib3Location = 2;
+ static const GLenum kAttrib1Type = GL_FLOAT_VEC4;
+ static const GLenum kAttrib2Type = GL_FLOAT_VEC2;
+ static const GLenum kAttrib3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidAttribLocation = 30;
+ static const GLint kBadAttribIndex = kNumVertexAttribs;
+
+ static const char* kUniform1Name;
+ static const char* kUniform2Name;
+ static const char* kUniform3BadName;
+ static const char* kUniform3GoodName;
+ static const GLint kUniform1Size = 1;
+ static const GLint kUniform2Size = 3;
+ static const GLint kUniform3Size = 2;
+ static const int kUniform1Precision = SH_PRECISION_LOWP;
+ static const int kUniform2Precision = SH_PRECISION_MEDIUMP;
+ static const int kUniform3Precision = SH_PRECISION_HIGHP;
+ static const int kUniform1StaticUse = 1;
+ static const int kUniform2StaticUse = 1;
+ static const int kUniform3StaticUse = 1;
+ static const GLint kUniform1FakeLocation = 0; // These are hard coded
+ static const GLint kUniform2FakeLocation = 1; // to match
+ static const GLint kUniform3FakeLocation = 2; // ProgramManager.
+ static const GLint kUniform1RealLocation = 11;
+ static const GLint kUniform2RealLocation = 22;
+ static const GLint kUniform3RealLocation = 33;
+ static const GLint kUniform1DesiredLocation = -1;
+ static const GLint kUniform2DesiredLocation = -1;
+ static const GLint kUniform3DesiredLocation = -1;
+ static const GLenum kUniform1Type = GL_FLOAT_VEC4;
+ static const GLenum kUniform2Type = GL_INT_VEC2;
+ static const GLenum kUniform3Type = GL_FLOAT_VEC3;
+ static const GLint kInvalidUniformLocation = 30;
+ static const GLint kBadUniformIndex = 1000;
+
+ static const size_t kNumAttribs;
+ static const size_t kNumUniforms;
+
+ protected:
+ typedef TestHelper::AttribInfo AttribInfo;
+ typedef TestHelper::UniformInfo UniformInfo;
+
+ typedef enum {
+ kVarUniform,
+ kVarVarying,
+ kVarAttribute
+ } VarCategory;
+
+ typedef struct {
+ int type;
+ int size;
+ int precision;
+ int static_use;
+ std::string name;
+ VarCategory category;
+ } VarInfo;
+
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ SetupDefaultShaderExpectations();
+
+ Shader* vertex_shader = shader_manager_.CreateShader(
+ kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fragment_shader =
+ shader_manager_.CreateShader(
+ kFragmentShaderClientId, kFragmentShaderServiceId,
+ GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader != NULL);
+ ASSERT_TRUE(fragment_shader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader, true);
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader, true);
+
+ program_ = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program_ != NULL);
+
+ program_->AttachShader(&shader_manager_, vertex_shader);
+ program_->AttachShader(&shader_manager_, fragment_shader);
+ program_->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ }
+
+ void SetupShader(AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ TestHelper::SetupShader(
+ gl_.get(), attribs, num_attribs, uniforms, num_uniforms, service_id);
+ }
+
+ void SetupDefaultShaderExpectations() {
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ }
+
+ void SetupExpectationsForClearingUniforms(
+ UniformInfo* uniforms, size_t num_uniforms) {
+ TestHelper::SetupExpectationsForClearingUniforms(
+ gl_.get(), uniforms, num_uniforms);
+ }
+
+ // Return true if link status matches expected_link_status
+ bool LinkAsExpected(Program* program,
+ bool expected_link_status) {
+ GLuint service_id = program->service_id();
+ if (expected_link_status) {
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ service_id);
+ }
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ GLint link_status;
+ program->GetProgramiv(GL_LINK_STATUS, &link_status);
+ return (static_cast<bool>(link_status) == expected_link_status);
+ }
+
+ Program* SetupShaderVariableTest(const VarInfo* vertex_variables,
+ size_t vertex_variable_size,
+ const VarInfo* fragment_variables,
+ size_t fragment_variable_size) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+
+ ShaderTranslator::VariableMap vertex_attrib_map;
+ ShaderTranslator::VariableMap vertex_uniform_map;
+ ShaderTranslator::VariableMap vertex_varying_map;
+ for (size_t ii = 0; ii < vertex_variable_size; ++ii) {
+ ShaderTranslator::VariableMap* map = NULL;
+ switch (vertex_variables[ii].category) {
+ case kVarAttribute:
+ map = &vertex_attrib_map;
+ break;
+ case kVarUniform:
+ map = &vertex_uniform_map;
+ break;
+ case kVarVarying:
+ map = &vertex_varying_map;
+ break;
+ default:
+ NOTREACHED();
+ }
+ (*map)[vertex_variables[ii].name] =
+ ShaderTranslator::VariableInfo(vertex_variables[ii].type,
+ vertex_variables[ii].size,
+ vertex_variables[ii].precision,
+ vertex_variables[ii].static_use,
+ vertex_variables[ii].name);
+ }
+
+ ShaderTranslator::VariableMap frag_attrib_map;
+ ShaderTranslator::VariableMap frag_uniform_map;
+ ShaderTranslator::VariableMap frag_varying_map;
+ for (size_t ii = 0; ii < fragment_variable_size; ++ii) {
+ ShaderTranslator::VariableMap* map = NULL;
+ switch (fragment_variables[ii].category) {
+ case kVarAttribute:
+ map = &frag_attrib_map;
+ break;
+ case kVarUniform:
+ map = &frag_uniform_map;
+ break;
+ case kVarVarying:
+ map = &frag_varying_map;
+ break;
+ default:
+ NOTREACHED();
+ }
+ (*map)[fragment_variables[ii].name] =
+ ShaderTranslator::VariableInfo(fragment_variables[ii].type,
+ fragment_variables[ii].size,
+ fragment_variables[ii].precision,
+ fragment_variables[ii].static_use,
+ fragment_variables[ii].name);
+ }
+
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ EXPECT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL,
+ &vertex_attrib_map, &vertex_uniform_map, &vertex_varying_map, NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL,
+ &frag_attrib_map, &frag_uniform_map, &frag_varying_map, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ EXPECT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ return program;
+ }
+
+ static AttribInfo kAttribs[];
+ static UniformInfo kUniforms[];
+
+ ProgramManager manager_;
+ Program* program_;
+ ShaderManager shader_manager_;
+};
+
+ProgramManagerWithShaderTest::AttribInfo
+ ProgramManagerWithShaderTest::kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint ProgramManagerWithShaderTest::kNumVertexAttribs;
+const GLuint ProgramManagerWithShaderTest::kClientProgramId;
+const GLuint ProgramManagerWithShaderTest::kServiceProgramId;
+const GLuint ProgramManagerWithShaderTest::kVertexShaderClientId;
+const GLuint ProgramManagerWithShaderTest::kFragmentShaderClientId;
+const GLuint ProgramManagerWithShaderTest::kVertexShaderServiceId;
+const GLuint ProgramManagerWithShaderTest::kFragmentShaderServiceId;
+const GLint ProgramManagerWithShaderTest::kAttrib1Size;
+const GLint ProgramManagerWithShaderTest::kAttrib2Size;
+const GLint ProgramManagerWithShaderTest::kAttrib3Size;
+const GLint ProgramManagerWithShaderTest::kAttrib1Location;
+const GLint ProgramManagerWithShaderTest::kAttrib2Location;
+const GLint ProgramManagerWithShaderTest::kAttrib3Location;
+const GLenum ProgramManagerWithShaderTest::kAttrib1Type;
+const GLenum ProgramManagerWithShaderTest::kAttrib2Type;
+const GLenum ProgramManagerWithShaderTest::kAttrib3Type;
+const GLint ProgramManagerWithShaderTest::kInvalidAttribLocation;
+const GLint ProgramManagerWithShaderTest::kBadAttribIndex;
+const GLint ProgramManagerWithShaderTest::kUniform1Size;
+const GLint ProgramManagerWithShaderTest::kUniform2Size;
+const GLint ProgramManagerWithShaderTest::kUniform3Size;
+const GLint ProgramManagerWithShaderTest::kUniform1FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3FakeLocation;
+const GLint ProgramManagerWithShaderTest::kUniform1RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3RealLocation;
+const GLint ProgramManagerWithShaderTest::kUniform1DesiredLocation;
+const GLint ProgramManagerWithShaderTest::kUniform2DesiredLocation;
+const GLint ProgramManagerWithShaderTest::kUniform3DesiredLocation;
+const GLenum ProgramManagerWithShaderTest::kUniform1Type;
+const GLenum ProgramManagerWithShaderTest::kUniform2Type;
+const GLenum ProgramManagerWithShaderTest::kUniform3Type;
+const GLint ProgramManagerWithShaderTest::kInvalidUniformLocation;
+const GLint ProgramManagerWithShaderTest::kBadUniformIndex;
+#endif
+
+const size_t ProgramManagerWithShaderTest::kNumAttribs =
+ arraysize(ProgramManagerWithShaderTest::kAttribs);
+
+ProgramManagerWithShaderTest::UniformInfo
+ ProgramManagerWithShaderTest::kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+};
+
+const size_t ProgramManagerWithShaderTest::kNumUniforms =
+ arraysize(ProgramManagerWithShaderTest::kUniforms);
+
+const char* ProgramManagerWithShaderTest::kAttrib1Name = "attrib1";
+const char* ProgramManagerWithShaderTest::kAttrib2Name = "attrib2";
+const char* ProgramManagerWithShaderTest::kAttrib3Name = "attrib3";
+const char* ProgramManagerWithShaderTest::kUniform1Name = "uniform1";
+// Correctly has array spec.
+const char* ProgramManagerWithShaderTest::kUniform2Name = "uniform2[0]";
+// Incorrectly missing array spec.
+const char* ProgramManagerWithShaderTest::kUniform3BadName = "uniform3";
+const char* ProgramManagerWithShaderTest::kUniform3GoodName = "uniform3[0]";
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribInfos) {
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::AttribInfoVector& infos =
+ program->GetAttribInfos();
+ ASSERT_EQ(kNumAttribs, infos.size());
+ for (size_t ii = 0; ii < kNumAttribs; ++ii) {
+ const Program::VertexAttrib& info = infos[ii];
+ const AttribInfo& expected = kAttribs[ii];
+ EXPECT_EQ(expected.size, info.size);
+ EXPECT_EQ(expected.type, info.type);
+ EXPECT_EQ(expected.location, info.location);
+ EXPECT_STREQ(expected.name, info.name.c_str());
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribInfo) {
+ const GLint kValidIndex = 1;
+ const GLint kInvalidIndex = 1000;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::VertexAttrib* info =
+ program->GetAttribInfo(kValidIndex);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kAttrib2Size, info->size);
+ EXPECT_EQ(kAttrib2Type, info->type);
+ EXPECT_EQ(kAttrib2Location, info->location);
+ EXPECT_STREQ(kAttrib2Name, info->name.c_str());
+ EXPECT_TRUE(program->GetAttribInfo(kInvalidIndex) == NULL);
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetAttribLocation) {
+ const char* kInvalidName = "foo";
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_EQ(kAttrib2Location, program->GetAttribLocation(kAttrib2Name));
+ EXPECT_EQ(-1, program->GetAttribLocation(kInvalidName));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformInfo) {
+ const GLint kInvalidIndex = 1000;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ const Program::UniformInfo* info =
+ program->GetUniformInfo(0);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform1Size, info->size);
+ EXPECT_EQ(kUniform1Type, info->type);
+ EXPECT_EQ(kUniform1RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform1Name, info->name.c_str());
+ info = program->GetUniformInfo(1);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2Size, info->size);
+ EXPECT_EQ(kUniform2Type, info->type);
+ EXPECT_EQ(kUniform2RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform2Name, info->name.c_str());
+ info = program->GetUniformInfo(2);
+ // We emulate certain OpenGL drivers by supplying the name without
+ // the array spec. Our implementation should correctly add the required spec.
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform3Size, info->size);
+ EXPECT_EQ(kUniform3Type, info->type);
+ EXPECT_EQ(kUniform3RealLocation, info->element_locations[0]);
+ EXPECT_STREQ(kUniform3GoodName, info->name.c_str());
+ EXPECT_TRUE(program->GetUniformInfo(kInvalidIndex) == NULL);
+}
+
+TEST_F(ProgramManagerWithShaderTest, AttachDetachShader) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->CanLink());
+ program->DetachShader(&shader_manager_, vshader);
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->CanLink());
+ program->DetachShader(&shader_manager_, fshader);
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_FALSE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_FALSE(program->CanLink());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), vshader, false);
+ EXPECT_FALSE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ EXPECT_TRUE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), fshader, false);
+ EXPECT_FALSE(program->CanLink());
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_TRUE(program->DetachShader(&shader_manager_, fshader));
+ EXPECT_FALSE(program->DetachShader(&shader_manager_, fshader));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformFakeLocation) {
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ // Emulate the situation that uniform3[1] isn't used and optimized out by
+ // a driver, so it's location is -1.
+ Program::UniformInfo* uniform = const_cast<Program::UniformInfo*>(
+ program->GetUniformInfo(2));
+ ASSERT_TRUE(uniform != NULL && kUniform3Size == 2);
+ EXPECT_EQ(kUniform3Size, uniform->size);
+ uniform->element_locations[1] = -1;
+ EXPECT_EQ(kUniform1FakeLocation,
+ program->GetUniformFakeLocation(kUniform1Name));
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation(kUniform2Name));
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3BadName));
+ // Check we can get uniform2 as "uniform2" even though the name is
+ // "uniform2[0]"
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation("uniform2"));
+ // Check we can get uniform3 as "uniform3[0]" even though we simulated GL
+ // returning "uniform3"
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3GoodName));
+ // Check that we can get the locations of the array elements > 1
+ EXPECT_EQ(ProgramManager::MakeFakeLocation(kUniform2FakeLocation, 1),
+ program->GetUniformFakeLocation("uniform2[1]"));
+ EXPECT_EQ(ProgramManager::MakeFakeLocation(kUniform2FakeLocation, 2),
+ program->GetUniformFakeLocation("uniform2[2]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform2[3]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform3[1]"));
+ EXPECT_EQ(-1, program->GetUniformFakeLocation("uniform3[2]"));
+}
+
+TEST_F(ProgramManagerWithShaderTest, GetUniformInfoByFakeLocation) {
+ const GLint kInvalidLocation = 1234;
+ const Program::UniformInfo* info;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ GLint real_location = -1;
+ GLint array_index = -1;
+ ASSERT_TRUE(program != NULL);
+ info = program->GetUniformInfoByFakeLocation(
+ kUniform2FakeLocation, &real_location, &array_index);
+ EXPECT_EQ(kUniform2RealLocation, real_location);
+ EXPECT_EQ(0, array_index);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2Type, info->type);
+ real_location = -1;
+ array_index = -1;
+ info = program->GetUniformInfoByFakeLocation(
+ kInvalidLocation, &real_location, &array_index);
+ EXPECT_TRUE(info == NULL);
+ EXPECT_EQ(-1, real_location);
+ EXPECT_EQ(-1, array_index);
+ GLint loc = program->GetUniformFakeLocation("uniform2[2]");
+ info = program->GetUniformInfoByFakeLocation(
+ loc, &real_location, &array_index);
+ ASSERT_TRUE(info != NULL);
+ EXPECT_EQ(kUniform2RealLocation + 2 * 2, real_location);
+ EXPECT_EQ(2, array_index);
+}
+
+// Some GL drivers incorrectly return gl_DepthRange and possibly other uniforms
+// that start with "gl_". Our implementation catches these and does not allow
+// them back to client.
+TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsGLUnderscoreUniform) {
+ static const char* kUniform2Name = "gl_longNameWeCanCheckFor";
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ SetupShader(
+ kAttribs, kNumAttribs, kUniforms, kNumUniforms, kServiceProgramId);
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ GLint value = 0;
+ program->GetProgramiv(GL_ACTIVE_ATTRIBUTES, &value);
+ EXPECT_EQ(3, value);
+ // Check that we skipped the "gl_" uniform.
+ program->GetProgramiv(GL_ACTIVE_UNIFORMS, &value);
+ EXPECT_EQ(2, value);
+ // Check that our max length adds room for the array spec and is not as long
+ // as the "gl_" uniform we skipped.
+ // +4u is to account for "gl_" and NULL terminator.
+ program->GetProgramiv(GL_ACTIVE_UNIFORM_MAX_LENGTH, &value);
+ EXPECT_EQ(strlen(kUniform3BadName) + 4u, static_cast<size_t>(value));
+}
+
+// Test the bug comparing similar array names is fixed.
+TEST_F(ProgramManagerWithShaderTest, SimilarArrayNames) {
+ static const char* kUniform2Name = "u_nameLong[0]";
+ static const char* kUniform3Name = "u_name[0]";
+ static const GLint kUniform2Size = 2;
+ static const GLint kUniform3Size = 2;
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3Name,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3Name,
+ },
+ };
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ SetupShader(
+ kAttribs, kNumAttribs, kUniforms, kNumUniforms, kServiceProgramId);
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+
+ // Check that we get the correct locations.
+ EXPECT_EQ(kUniform2FakeLocation,
+ program->GetUniformFakeLocation(kUniform2Name));
+ EXPECT_EQ(kUniform3FakeLocation,
+ program->GetUniformFakeLocation(kUniform3Name));
+}
+
+// Some GL drivers incorrectly return the wrong type. For example they return
+// GL_FLOAT_VEC2 when they should return GL_FLOAT_MAT2. Check we handle this.
+TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsWrongTypeInfo) {
+ static GLenum kAttrib2BadType = GL_FLOAT_VEC2;
+ static GLenum kAttrib2GoodType = GL_FLOAT_MAT2;
+ static GLenum kUniform2BadType = GL_FLOAT_VEC3;
+ static GLenum kUniform2GoodType = GL_FLOAT_MAT3;
+ ShaderTranslator::VariableMap attrib_map;
+ ShaderTranslator::VariableMap uniform_map;
+ ShaderTranslator::VariableMap varying_map;
+ attrib_map[kAttrib1Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib1Type, kAttrib1Size, kAttrib1Precision,
+ kAttribStaticUse, kAttrib1Name);
+ attrib_map[kAttrib2Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib2GoodType, kAttrib2Size, kAttrib2Precision,
+ kAttribStaticUse, kAttrib2Name);
+ attrib_map[kAttrib3Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib3Type, kAttrib3Size, kAttrib3Precision,
+ kAttribStaticUse, kAttrib3Name);
+ uniform_map[kUniform1Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform1Type, kUniform1Size, kUniform1Precision,
+ kUniform1StaticUse, kUniform1Name);
+ uniform_map[kUniform2Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform2GoodType, kUniform2Size, kUniform2Precision,
+ kUniform2StaticUse, kUniform2Name);
+ uniform_map[kUniform3GoodName] = ShaderTranslatorInterface::VariableInfo(
+ kUniform3Type, kUniform3Size, kUniform3Precision,
+ kUniform3StaticUse, kUniform3GoodName);
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2BadType, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ static ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2BadType,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumAttribs= arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program!= NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ // Check that we got the good type, not the bad.
+ // Check Attribs
+ for (unsigned index = 0; index < kNumAttribs; ++index) {
+ const Program::VertexAttrib* attrib_info =
+ program->GetAttribInfo(index);
+ ASSERT_TRUE(attrib_info != NULL);
+ ShaderTranslator::VariableMap::const_iterator it = attrib_map.find(
+ attrib_info->name);
+ ASSERT_TRUE(it != attrib_map.end());
+ EXPECT_EQ(it->first, attrib_info->name);
+ EXPECT_EQ(static_cast<GLenum>(it->second.type), attrib_info->type);
+ EXPECT_EQ(it->second.size, attrib_info->size);
+ EXPECT_EQ(it->second.name, attrib_info->name);
+ }
+ // Check Uniforms
+ for (unsigned index = 0; index < kNumUniforms; ++index) {
+ const Program::UniformInfo* uniform_info =
+ program->GetUniformInfo(index);
+ ASSERT_TRUE(uniform_info != NULL);
+ ShaderTranslator::VariableMap::const_iterator it = uniform_map.find(
+ uniform_info->name);
+ ASSERT_TRUE(it != uniform_map.end());
+ EXPECT_EQ(it->first, uniform_info->name);
+ EXPECT_EQ(static_cast<GLenum>(it->second.type), uniform_info->type);
+ EXPECT_EQ(it->second.size, uniform_info->size);
+ EXPECT_EQ(it->second.name, uniform_info->name);
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoUseCount) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(vshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(fshader->InUse());
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_FALSE(program->InUse());
+ EXPECT_FALSE(program->IsDeleted());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.MarkAsDeleted(&shader_manager_, program);
+ EXPECT_TRUE(program->IsDeleted());
+ Program* info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_EQ(program, info2);
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_TRUE(program->InUse());
+ // this should delete the info.
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.UnuseProgram(&shader_manager_, program);
+ info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_TRUE(info2 == NULL);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoUseCount2) {
+ static const GLuint kClientProgramId = 124;
+ static const GLuint kServiceProgramId = 457;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_FALSE(program->CanLink());
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(vshader->InUse());
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(fshader->InUse());
+ EXPECT_TRUE(program->CanLink());
+ EXPECT_FALSE(program->InUse());
+ EXPECT_FALSE(program->IsDeleted());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UseProgram(program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_TRUE(program->InUse());
+ manager_.UnuseProgram(&shader_manager_, program);
+ EXPECT_FALSE(program->InUse());
+ Program* info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_EQ(program, info2);
+ // this should delete the program.
+ EXPECT_CALL(*gl_, DeleteProgram(kServiceProgramId))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.MarkAsDeleted(&shader_manager_, program);
+ info2 = manager_.GetProgram(kClientProgramId);
+ EXPECT_TRUE(info2 == NULL);
+ EXPECT_FALSE(vshader->InUse());
+ EXPECT_FALSE(fshader->InUse());
+}
+
+TEST_F(ProgramManagerWithShaderTest, ProgramInfoGetProgramInfo) {
+ CommonDecoder::Bucket bucket;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ program->GetProgramInfo(&manager_, &bucket);
+ ProgramInfoHeader* header =
+ bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(1u, header->link_status);
+ EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
+ EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
+ sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ ASSERT_TRUE(inputs != NULL);
+ const ProgramInput* input = inputs;
+ // TODO(gman): Don't assume these are in order.
+ for (uint32 ii = 0; ii < header->num_attribs; ++ii) {
+ const AttribInfo& expected = kAttribs[ii];
+ EXPECT_EQ(expected.size, input->size);
+ EXPECT_EQ(expected.type, input->type);
+ const int32* location = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32));
+ ASSERT_TRUE(location != NULL);
+ EXPECT_EQ(expected.location, *location);
+ const char* name_buf = bucket.GetDataAs<const char*>(
+ input->name_offset, input->name_length);
+ ASSERT_TRUE(name_buf != NULL);
+ std::string name(name_buf, input->name_length);
+ EXPECT_STREQ(expected.name, name.c_str());
+ ++input;
+ }
+ // TODO(gman): Don't assume these are in order.
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const UniformInfo& expected = kUniforms[ii];
+ EXPECT_EQ(expected.size, input->size);
+ EXPECT_EQ(expected.type, input->type);
+ const int32* locations = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32) * input->size);
+ ASSERT_TRUE(locations != NULL);
+ for (int32 jj = 0; jj < input->size; ++jj) {
+ EXPECT_EQ(
+ ProgramManager::MakeFakeLocation(expected.fake_location, jj),
+ locations[jj]);
+ }
+ const char* name_buf = bucket.GetDataAs<const char*>(
+ input->name_offset, input->name_length);
+ ASSERT_TRUE(name_buf != NULL);
+ std::string name(name_buf, input->name_length);
+ EXPECT_STREQ(expected.good_name, name.c_str());
+ ++input;
+ }
+ EXPECT_EQ(header->num_attribs + header->num_uniforms,
+ static_cast<uint32>(input - inputs));
+}
+
+// Some drivers optimize out unused uniform array elements, so their
+// location would be -1.
+TEST_F(ProgramManagerWithShaderTest, UnusedUniformArrayElements) {
+ CommonDecoder::Bucket bucket;
+ const Program* program = manager_.GetProgram(kClientProgramId);
+ ASSERT_TRUE(program != NULL);
+ // Emulate the situation that only the first element has a valid location.
+ // TODO(zmo): Don't assume these are in order.
+ for (size_t ii = 0; ii < arraysize(kUniforms); ++ii) {
+ Program::UniformInfo* uniform = const_cast<Program::UniformInfo*>(
+ program->GetUniformInfo(ii));
+ ASSERT_TRUE(uniform != NULL);
+ EXPECT_EQ(static_cast<size_t>(kUniforms[ii].size),
+ uniform->element_locations.size());
+ for (GLsizei jj = 1; jj < uniform->size; ++jj)
+ uniform->element_locations[jj] = -1;
+ }
+ program->GetProgramInfo(&manager_, &bucket);
+ ProgramInfoHeader* header =
+ bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(1u, header->link_status);
+ EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
+ EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
+ sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ ASSERT_TRUE(inputs != NULL);
+ const ProgramInput* input = inputs + header->num_attribs;
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const UniformInfo& expected = kUniforms[ii];
+ EXPECT_EQ(expected.size, input->size);
+ const int32* locations = bucket.GetDataAs<const int32*>(
+ input->location_offset, sizeof(int32) * input->size);
+ ASSERT_TRUE(locations != NULL);
+ EXPECT_EQ(
+ ProgramManager::MakeFakeLocation(expected.fake_location, 0),
+ locations[0]);
+ for (int32 jj = 1; jj < input->size; ++jj)
+ EXPECT_EQ(-1, locations[jj]);
+ ++input;
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, BindAttribLocationConflicts) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+ ShaderTranslator::VariableMap attrib_map;
+ for (uint32 ii = 0; ii < kNumAttribs; ++ii) {
+ attrib_map[kAttribs[ii].name] = ShaderTranslatorInterface::VariableInfo(
+ kAttribs[ii].type,
+ kAttribs[ii].size,
+ SH_PRECISION_MEDIUMP,
+ kAttribStaticUse,
+ kAttribs[ii].name);
+ }
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ ASSERT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL, &attrib_map, NULL, NULL, NULL);
+ // Check attrib infos got copied.
+ for (ShaderTranslator::VariableMap::const_iterator it = attrib_map.begin();
+ it != attrib_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ vshader->GetAttribInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_EQ(it->second.name, variable_info->name);
+ }
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL, &attrib_map, NULL, NULL, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib1Name, 0);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding("xxx", 0);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib2Name, 1);
+ EXPECT_FALSE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+
+ program->SetAttribLocationBinding(kAttrib2Name, 0);
+ EXPECT_TRUE(program->DetectAttribLocationBindingConflicts());
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+TEST_F(ProgramManagerWithShaderTest, UniformsPrecisionMismatch) {
+ // Set up shader
+ const GLuint kVShaderClientId = 1;
+ const GLuint kVShaderServiceId = 11;
+ const GLuint kFShaderClientId = 2;
+ const GLuint kFShaderServiceId = 12;
+
+ ShaderTranslator::VariableMap vertex_uniform_map;
+ vertex_uniform_map["a"] = ShaderTranslator::VariableInfo(
+ 1, 3, SH_PRECISION_MEDIUMP, 1, "a");
+ ShaderTranslator::VariableMap frag_uniform_map;
+ frag_uniform_map["a"] = ShaderTranslator::VariableInfo(
+ 1, 3, SH_PRECISION_LOWP, 1, "a");
+
+ // Check we can create shader.
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ // Check shader got created.
+ ASSERT_TRUE(vshader != NULL && fshader != NULL);
+ // Set Status
+ TestHelper::SetShaderStates(
+ gl_.get(), vshader, true, NULL, NULL, NULL,
+ &vertex_uniform_map, NULL, NULL);
+ TestHelper::SetShaderStates(
+ gl_.get(), fshader, true, NULL, NULL, NULL,
+ &frag_uniform_map, NULL, NULL);
+
+ // Set up program
+ const GLuint kClientProgramId = 6666;
+ const GLuint kServiceProgramId = 8888;
+ Program* program =
+ manager_.CreateProgram(kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectUniformsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different type in the vertex and fragment
+// shader, linking should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingTypeMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT_VEC3, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different array size in the vertex and fragment
+// shader, linking should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingArraySizeMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT, 2, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying has different precision in the vertex and fragment
+// shader, linking should succeed.
+TEST_F(ProgramManagerWithShaderTest, VaryingPrecisionMismatch) {
+ const VarInfo kVertexVarying =
+ { GL_FLOAT, 2, SH_PRECISION_HIGHP, 1, "a", kVarVarying };
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 2, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ &kVertexVarying, 1, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_FALSE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_TRUE(conflicting_name.empty());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// If a varying is statically used in fragment shader but not
+// declared in vertex shader, link should fail.
+TEST_F(ProgramManagerWithShaderTest, VaryingMissing) {
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ NULL, 0, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// If a varying is declared but not statically used in fragment
+// shader, even if it's not declared in vertex shader, link should
+// succeed.
+TEST_F(ProgramManagerWithShaderTest, InactiveVarying) {
+ const VarInfo kFragmentVarying =
+ { GL_FLOAT, 3, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying };
+ Program* program = SetupShaderVariableTest(
+ NULL, 0, &kFragmentVarying, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_FALSE(program->DetectVaryingsMismatch(&conflicting_name));
+ EXPECT_TRUE(conflicting_name.empty());
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// Uniforms and attributes are both global variables, thus sharing
+// the same namespace. Any name conflicts should cause link
+// failure.
+TEST_F(ProgramManagerWithShaderTest, AttribUniformNameConflict) {
+ const VarInfo kVertexAttribute =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarAttribute };
+ const VarInfo kFragmentUniform =
+ { GL_FLOAT_VEC4, 1, SH_PRECISION_MEDIUMP, 1, "a", kVarUniform };
+ Program* program = SetupShaderVariableTest(
+ &kVertexAttribute, 1, &kFragmentUniform, 1);
+
+ std::string conflicting_name;
+
+ EXPECT_TRUE(program->DetectGlobalNameConflicts(&conflicting_name));
+ EXPECT_EQ("a", conflicting_name);
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// Varyings go over 8 rows.
+TEST_F(ProgramManagerWithShaderTest, TooManyVaryings) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_FALSE(
+ program->CheckVaryingsPacking(Program::kCountOnlyStaticallyUsed));
+ EXPECT_TRUE(LinkAsExpected(program, false));
+}
+
+// Varyings go over 8 rows but some are inactive
+TEST_F(ProgramManagerWithShaderTest, TooManyInactiveVaryings) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_TRUE(
+ program->CheckVaryingsPacking(Program::kCountOnlyStaticallyUsed));
+ EXPECT_TRUE(LinkAsExpected(program, true));
+}
+
+// Varyings go over 8 rows but some are inactive.
+// However, we still fail the check if kCountAll option is used.
+TEST_F(ProgramManagerWithShaderTest, CountAllVaryingsInPacking) {
+ const VarInfo kVertexVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 1, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ const VarInfo kFragmentVaryings[] = {
+ { GL_FLOAT_VEC4, 4, SH_PRECISION_MEDIUMP, 0, "a", kVarVarying },
+ { GL_FLOAT_VEC4, 5, SH_PRECISION_MEDIUMP, 1, "b", kVarVarying }
+ };
+ Program* program = SetupShaderVariableTest(
+ kVertexVaryings, 2, kFragmentVaryings, 2);
+
+ EXPECT_FALSE(program->CheckVaryingsPacking(Program::kCountAll));
+}
+
+TEST_F(ProgramManagerWithShaderTest, ClearWithSamplerTypes) {
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+
+ static const GLenum kSamplerTypes[] = {
+ GL_SAMPLER_2D,
+ GL_SAMPLER_CUBE,
+ GL_SAMPLER_EXTERNAL_OES,
+ GL_SAMPLER_3D_OES,
+ GL_SAMPLER_2D_RECT_ARB,
+ };
+ const size_t kNumSamplerTypes = arraysize(kSamplerTypes);
+ for (size_t ii = 0; ii < kNumSamplerTypes; ++ii) {
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kSamplerTypes[ii],
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+ const size_t kNumAttribs = arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+ SetupExpectationsForClearingUniforms(kUniforms, kNumUniforms);
+ manager_.ClearUniforms(program);
+ }
+}
+
+TEST_F(ProgramManagerWithShaderTest, BindUniformLocation) {
+ const GLuint kVShaderClientId = 2001;
+ const GLuint kFShaderClientId = 2002;
+ const GLuint kVShaderServiceId = 3001;
+ const GLuint kFShaderServiceId = 3002;
+
+ const GLint kUniform1DesiredLocation = 10;
+ const GLint kUniform2DesiredLocation = -1;
+ const GLint kUniform3DesiredLocation = 5;
+
+ Shader* vshader = shader_manager_.CreateShader(
+ kVShaderClientId, kVShaderServiceId, GL_VERTEX_SHADER);
+ ASSERT_TRUE(vshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), vshader, true);
+ Shader* fshader = shader_manager_.CreateShader(
+ kFShaderClientId, kFShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(fshader != NULL);
+ TestHelper::SetShaderStates(gl_.get(), fshader, true);
+ static const GLuint kClientProgramId = 1234;
+ static const GLuint kServiceProgramId = 5679;
+ Program* program = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program != NULL);
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
+ EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
+ EXPECT_TRUE(program->SetUniformLocationBinding(
+ kUniform1Name, kUniform1DesiredLocation));
+ EXPECT_TRUE(program->SetUniformLocationBinding(
+ kUniform3BadName, kUniform3DesiredLocation));
+
+ static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
+ { kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
+ { kAttrib2Name, kAttrib2Size, kAttrib2Type, kAttrib2Location, },
+ { kAttrib3Name, kAttrib3Size, kAttrib3Type, kAttrib3Location, },
+ };
+ ProgramManagerWithShaderTest::UniformInfo kUniforms[] = {
+ { kUniform1Name,
+ kUniform1Size,
+ kUniform1Type,
+ kUniform1FakeLocation,
+ kUniform1RealLocation,
+ kUniform1DesiredLocation,
+ kUniform1Name,
+ },
+ { kUniform2Name,
+ kUniform2Size,
+ kUniform2Type,
+ kUniform2FakeLocation,
+ kUniform2RealLocation,
+ kUniform2DesiredLocation,
+ kUniform2Name,
+ },
+ { kUniform3BadName,
+ kUniform3Size,
+ kUniform3Type,
+ kUniform3FakeLocation,
+ kUniform3RealLocation,
+ kUniform3DesiredLocation,
+ kUniform3GoodName,
+ },
+ };
+
+ const size_t kNumAttribs = arraysize(kAttribs);
+ const size_t kNumUniforms = arraysize(kUniforms);
+ SetupShader(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
+ kServiceProgramId);
+ program->Link(NULL, NULL, NULL, Program::kCountOnlyStaticallyUsed,
+ base::Bind(&ShaderCacheCb));
+
+ EXPECT_EQ(kUniform1DesiredLocation,
+ program->GetUniformFakeLocation(kUniform1Name));
+ EXPECT_EQ(kUniform3DesiredLocation,
+ program->GetUniformFakeLocation(kUniform3BadName));
+ EXPECT_EQ(kUniform3DesiredLocation,
+ program->GetUniformFakeLocation(kUniform3GoodName));
+}
+
+class ProgramManagerWithCacheTest : public GpuServiceTest {
+ public:
+ static const GLuint kClientProgramId = 1;
+ static const GLuint kServiceProgramId = 10;
+ static const GLuint kVertexShaderClientId = 2;
+ static const GLuint kFragmentShaderClientId = 20;
+ static const GLuint kVertexShaderServiceId = 3;
+ static const GLuint kFragmentShaderServiceId = 30;
+
+ ProgramManagerWithCacheTest()
+ : cache_(new MockProgramCache()),
+ manager_(cache_.get(), kMaxVaryingVectors),
+ vertex_shader_(NULL),
+ fragment_shader_(NULL),
+ program_(NULL) {
+ }
+ virtual ~ProgramManagerWithCacheTest() {
+ manager_.Destroy(false);
+ shader_manager_.Destroy(false);
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ vertex_shader_ = shader_manager_.CreateShader(
+ kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
+ fragment_shader_ = shader_manager_.CreateShader(
+ kFragmentShaderClientId, kFragmentShaderServiceId, GL_FRAGMENT_SHADER);
+ ASSERT_TRUE(vertex_shader_ != NULL);
+ ASSERT_TRUE(fragment_shader_ != NULL);
+ vertex_shader_->set_source("lka asjf bjajsdfj");
+ fragment_shader_->set_source("lka asjf a fasgag 3rdsf3 bjajsdfj");
+
+ program_ = manager_.CreateProgram(
+ kClientProgramId, kServiceProgramId);
+ ASSERT_TRUE(program_ != NULL);
+
+ program_->AttachShader(&shader_manager_, vertex_shader_);
+ program_->AttachShader(&shader_manager_, fragment_shader_);
+ }
+
+ void SetShadersCompiled() {
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true);
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
+ }
+
+ void SetProgramCached() {
+ cache_->LinkedProgramCacheSuccess(
+ vertex_shader_->source(),
+ NULL,
+ fragment_shader_->source(),
+ NULL,
+ &program_->bind_attrib_location_map());
+ }
+
+ void SetExpectationsForProgramCached() {
+ SetExpectationsForProgramCached(program_,
+ vertex_shader_,
+ fragment_shader_);
+ }
+
+ void SetExpectationsForProgramCached(
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader) {
+ EXPECT_CALL(*cache_.get(), SaveLinkedProgram(
+ program->service_id(),
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _)).Times(1);
+ }
+
+ void SetExpectationsForNotCachingProgram() {
+ SetExpectationsForNotCachingProgram(program_,
+ vertex_shader_,
+ fragment_shader_);
+ }
+
+ void SetExpectationsForNotCachingProgram(
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader) {
+ EXPECT_CALL(*cache_.get(), SaveLinkedProgram(
+ program->service_id(),
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _)).Times(0);
+ }
+
+ void SetExpectationsForProgramLoad(ProgramCache::ProgramLoadResult result) {
+ SetExpectationsForProgramLoad(kServiceProgramId,
+ program_,
+ vertex_shader_,
+ fragment_shader_,
+ result);
+ }
+
+ void SetExpectationsForProgramLoad(
+ GLuint service_program_id,
+ Program* program,
+ Shader* vertex_shader,
+ Shader* fragment_shader,
+ ProgramCache::ProgramLoadResult result) {
+ EXPECT_CALL(*cache_.get(),
+ LoadLinkedProgram(service_program_id,
+ vertex_shader,
+ NULL,
+ fragment_shader,
+ NULL,
+ &program->bind_attrib_location_map(),
+ _))
+ .WillOnce(Return(result));
+ }
+
+ void SetExpectationsForProgramLoadSuccess() {
+ SetExpectationsForProgramLoadSuccess(kServiceProgramId);
+ }
+
+ void SetExpectationsForProgramLoadSuccess(GLuint service_program_id) {
+ TestHelper::SetupProgramSuccessExpectations(gl_.get(),
+ NULL,
+ 0,
+ NULL,
+ 0,
+ service_program_id);
+ }
+
+ void SetExpectationsForProgramLink() {
+ SetExpectationsForProgramLink(kServiceProgramId);
+ }
+
+ void SetExpectationsForProgramLink(GLuint service_program_id) {
+ TestHelper::SetupShader(gl_.get(), NULL, 0, NULL, 0, service_program_id);
+ if (gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary) {
+ EXPECT_CALL(*gl_.get(),
+ ProgramParameteri(service_program_id,
+ PROGRAM_BINARY_RETRIEVABLE_HINT,
+ GL_TRUE)).Times(1);
+ }
+ }
+
+ void SetExpectationsForSuccessCompile(
+ const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(1);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(1);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE));
+ }
+
+ void SetExpectationsForNoCompile(const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(0);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(0);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .Times(0);
+ }
+
+ void SetExpectationsForErrorCompile(const Shader* shader) {
+ const GLuint shader_id = shader->service_id();
+ const char* src = shader->source().c_str();
+ EXPECT_CALL(*gl_.get(),
+ ShaderSource(shader_id, 1, Pointee(src), NULL)).Times(1);
+ EXPECT_CALL(*gl_.get(), CompileShader(shader_id)).Times(1);
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_COMPILE_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(GL_FALSE));
+ EXPECT_CALL(*gl_.get(), GetShaderiv(shader_id, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0));
+ EXPECT_CALL(*gl_.get(), GetShaderInfoLog(shader_id, 0, _, _))
+ .Times(1);
+ }
+
+ scoped_ptr<MockProgramCache> cache_;
+ ProgramManager manager_;
+
+ Shader* vertex_shader_;
+ Shader* fragment_shader_;
+ Program* program_;
+ ShaderManager shader_manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint ProgramManagerWithCacheTest::kClientProgramId;
+const GLuint ProgramManagerWithCacheTest::kServiceProgramId;
+const GLuint ProgramManagerWithCacheTest::kVertexShaderClientId;
+const GLuint ProgramManagerWithCacheTest::kFragmentShaderClientId;
+const GLuint ProgramManagerWithCacheTest::kVertexShaderServiceId;
+const GLuint ProgramManagerWithCacheTest::kFragmentShaderServiceId;
+#endif
+
+TEST_F(ProgramManagerWithCacheTest, CacheProgramOnSuccessfulLink) {
+ SetShadersCompiled();
+ SetExpectationsForProgramLink();
+ SetExpectationsForProgramCached();
+ EXPECT_TRUE(program_->Link(NULL, NULL, NULL,
+ Program::kCountOnlyStaticallyUsed, base::Bind(&ShaderCacheCb)));
+}
+
+TEST_F(ProgramManagerWithCacheTest, LoadProgramOnProgramCacheHit) {
+ SetShadersCompiled();
+ SetProgramCached();
+
+ SetExpectationsForNoCompile(vertex_shader_);
+ SetExpectationsForNoCompile(fragment_shader_);
+ SetExpectationsForProgramLoad(ProgramCache::PROGRAM_LOAD_SUCCESS);
+ SetExpectationsForNotCachingProgram();
+ SetExpectationsForProgramLoadSuccess();
+
+ EXPECT_TRUE(program_->Link(NULL, NULL, NULL,
+ Program::kCountOnlyStaticallyUsed, base::Bind(&ShaderCacheCb)));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/query_manager.cc b/gpu/command_buffer/service/query_manager.cc
new file mode 100644
index 0000000..1d36c89
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager.cc
@@ -0,0 +1,747 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/query_manager.h"
+
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/numerics/safe_math.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "ui/gl/gl_fence.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+class AsyncPixelTransferCompletionObserverImpl
+ : public AsyncPixelTransferCompletionObserver {
+ public:
+ AsyncPixelTransferCompletionObserverImpl(base::subtle::Atomic32 submit_count)
+ : submit_count_(submit_count), cancelled_(false) {}
+
+ void Cancel() {
+ base::AutoLock locked(lock_);
+ cancelled_ = true;
+ }
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) OVERRIDE {
+ base::AutoLock locked(lock_);
+ if (!cancelled_) {
+ DCHECK(mem_params.buffer().get());
+ void* data = mem_params.GetDataAddress();
+ QuerySync* sync = static_cast<QuerySync*>(data);
+ base::subtle::Release_Store(&sync->process_count, submit_count_);
+ }
+ }
+
+ private:
+ virtual ~AsyncPixelTransferCompletionObserverImpl() {}
+
+ base::subtle::Atomic32 submit_count_;
+
+ base::Lock lock_;
+ bool cancelled_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferCompletionObserverImpl);
+};
+
+class AsyncPixelTransfersCompletedQuery
+ : public QueryManager::Query,
+ public base::SupportsWeakPtr<AsyncPixelTransfersCompletedQuery> {
+ public:
+ AsyncPixelTransfersCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~AsyncPixelTransfersCompletedQuery();
+
+ scoped_refptr<AsyncPixelTransferCompletionObserverImpl> observer_;
+};
+
+AsyncPixelTransfersCompletedQuery::AsyncPixelTransfersCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool AsyncPixelTransfersCompletedQuery::Begin() {
+ return true;
+}
+
+bool AsyncPixelTransfersCompletedQuery::End(
+ base::subtle::Atomic32 submit_count) {
+ // Get the real shared memory since it might need to be duped to prevent
+ // use-after-free of the memory.
+ scoped_refptr<Buffer> buffer =
+ manager()->decoder()->GetSharedMemoryBuffer(shm_id());
+ if (!buffer.get())
+ return false;
+ AsyncMemoryParams mem_params(buffer, shm_offset(), sizeof(QuerySync));
+ if (!mem_params.GetDataAddress())
+ return false;
+
+ observer_ = new AsyncPixelTransferCompletionObserverImpl(submit_count);
+
+ // Ask AsyncPixelTransferDelegate to run completion callback after all
+ // previous async transfers are done. No guarantee that callback is run
+ // on the current thread.
+ manager()->decoder()->GetAsyncPixelTransferManager()->AsyncNotifyCompletion(
+ mem_params, observer_.get());
+
+ return AddToPendingTransferQueue(submit_count);
+}
+
+bool AsyncPixelTransfersCompletedQuery::Process() {
+ QuerySync* sync = manager()->decoder()->GetSharedMemoryAs<QuerySync*>(
+ shm_id(), shm_offset(), sizeof(*sync));
+ if (!sync)
+ return false;
+
+ // Check if completion callback has been run. sync->process_count atomicity
+ // is guaranteed as this is already used to notify client of a completed
+ // query.
+ if (base::subtle::Acquire_Load(&sync->process_count) != submit_count())
+ return true;
+
+ UnmarkAsPending();
+ return true;
+}
+
+void AsyncPixelTransfersCompletedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+AsyncPixelTransfersCompletedQuery::~AsyncPixelTransfersCompletedQuery() {
+ if (observer_.get())
+ observer_->Cancel();
+}
+
+} // namespace
+
+class AllSamplesPassedQuery : public QueryManager::Query {
+ public:
+ AllSamplesPassedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset,
+ GLuint service_id);
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~AllSamplesPassedQuery();
+
+ private:
+ // Service side query id.
+ GLuint service_id_;
+};
+
+AllSamplesPassedQuery::AllSamplesPassedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset,
+ GLuint service_id)
+ : Query(manager, target, shm_id, shm_offset),
+ service_id_(service_id) {
+}
+
+bool AllSamplesPassedQuery::Begin() {
+ BeginQueryHelper(target(), service_id_);
+ return true;
+}
+
+bool AllSamplesPassedQuery::End(base::subtle::Atomic32 submit_count) {
+ EndQueryHelper(target());
+ return AddToPendingQueue(submit_count);
+}
+
+bool AllSamplesPassedQuery::Process() {
+ GLuint available = 0;
+ glGetQueryObjectuivARB(
+ service_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ if (!available) {
+ return true;
+ }
+ GLuint result = 0;
+ glGetQueryObjectuivARB(
+ service_id_, GL_QUERY_RESULT_EXT, &result);
+
+ return MarkAsCompleted(result != 0);
+}
+
+void AllSamplesPassedQuery::Destroy(bool have_context) {
+ if (have_context && !IsDeleted()) {
+ glDeleteQueriesARB(1, &service_id_);
+ MarkAsDeleted();
+ }
+}
+
+AllSamplesPassedQuery::~AllSamplesPassedQuery() {
+}
+
+class CommandsIssuedQuery : public QueryManager::Query {
+ public:
+ CommandsIssuedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandsIssuedQuery();
+
+ private:
+ base::TimeTicks begin_time_;
+};
+
+CommandsIssuedQuery::CommandsIssuedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool CommandsIssuedQuery::Begin() {
+ begin_time_ = base::TimeTicks::HighResNow();
+ return true;
+}
+
+bool CommandsIssuedQuery::End(base::subtle::Atomic32 submit_count) {
+ base::TimeDelta elapsed = base::TimeTicks::HighResNow() - begin_time_;
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(elapsed.InMicroseconds());
+}
+
+bool CommandsIssuedQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void CommandsIssuedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+CommandsIssuedQuery::~CommandsIssuedQuery() {
+}
+
+class CommandLatencyQuery : public QueryManager::Query {
+ public:
+ CommandLatencyQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandLatencyQuery();
+};
+
+CommandLatencyQuery::CommandLatencyQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool CommandLatencyQuery::Begin() {
+ return true;
+}
+
+bool CommandLatencyQuery::End(base::subtle::Atomic32 submit_count) {
+ base::TimeDelta now = base::TimeTicks::HighResNow() - base::TimeTicks();
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(now.InMicroseconds());
+}
+
+bool CommandLatencyQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void CommandLatencyQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+CommandLatencyQuery::~CommandLatencyQuery() {
+}
+
+
+class AsyncReadPixelsCompletedQuery
+ : public QueryManager::Query,
+ public base::SupportsWeakPtr<AsyncReadPixelsCompletedQuery> {
+ public:
+ AsyncReadPixelsCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ void Complete();
+ virtual ~AsyncReadPixelsCompletedQuery();
+
+ private:
+ bool completed_;
+ bool complete_result_;
+};
+
+AsyncReadPixelsCompletedQuery::AsyncReadPixelsCompletedQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset),
+ completed_(false),
+ complete_result_(false) {
+}
+
+bool AsyncReadPixelsCompletedQuery::Begin() {
+ return true;
+}
+
+bool AsyncReadPixelsCompletedQuery::End(base::subtle::Atomic32 submit_count) {
+ if (!AddToPendingQueue(submit_count)) {
+ return false;
+ }
+ manager()->decoder()->WaitForReadPixels(
+ base::Bind(&AsyncReadPixelsCompletedQuery::Complete,
+ AsWeakPtr()));
+
+ return Process();
+}
+
+void AsyncReadPixelsCompletedQuery::Complete() {
+ completed_ = true;
+ complete_result_ = MarkAsCompleted(1);
+}
+
+bool AsyncReadPixelsCompletedQuery::Process() {
+ return !completed_ || complete_result_;
+}
+
+void AsyncReadPixelsCompletedQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+AsyncReadPixelsCompletedQuery::~AsyncReadPixelsCompletedQuery() {
+}
+
+
+class GetErrorQuery : public QueryManager::Query {
+ public:
+ GetErrorQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~GetErrorQuery();
+
+ private:
+};
+
+GetErrorQuery::GetErrorQuery(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {
+}
+
+bool GetErrorQuery::Begin() {
+ return true;
+}
+
+bool GetErrorQuery::End(base::subtle::Atomic32 submit_count) {
+ MarkAsPending(submit_count);
+ return MarkAsCompleted(manager()->decoder()->GetErrorState()->GetGLError());
+}
+
+bool GetErrorQuery::Process() {
+ NOTREACHED();
+ return true;
+}
+
+void GetErrorQuery::Destroy(bool /* have_context */) {
+ if (!IsDeleted()) {
+ MarkAsDeleted();
+ }
+}
+
+GetErrorQuery::~GetErrorQuery() {
+}
+
+class CommandsCompletedQuery : public QueryManager::Query {
+ public:
+ CommandsCompletedQuery(QueryManager* manager,
+ GLenum target,
+ int32 shm_id,
+ uint32 shm_offset);
+
+ // Overridden from QueryManager::Query:
+ virtual bool Begin() OVERRIDE;
+ virtual bool End(base::subtle::Atomic32 submit_count) OVERRIDE;
+ virtual bool Process() OVERRIDE;
+ virtual void Destroy(bool have_context) OVERRIDE;
+
+ protected:
+ virtual ~CommandsCompletedQuery();
+
+ private:
+ scoped_ptr<gfx::GLFence> fence_;
+};
+
+CommandsCompletedQuery::CommandsCompletedQuery(QueryManager* manager,
+ GLenum target,
+ int32 shm_id,
+ uint32 shm_offset)
+ : Query(manager, target, shm_id, shm_offset) {}
+
+bool CommandsCompletedQuery::Begin() { return true; }
+
+bool CommandsCompletedQuery::End(base::subtle::Atomic32 submit_count) {
+ fence_.reset(gfx::GLFence::Create());
+ DCHECK(fence_);
+ return AddToPendingQueue(submit_count);
+}
+
+bool CommandsCompletedQuery::Process() {
+ if (fence_ && !fence_->HasCompleted())
+ return true;
+ return MarkAsCompleted(0);
+}
+
+void CommandsCompletedQuery::Destroy(bool have_context) {
+ if (have_context && !IsDeleted()) {
+ fence_.reset();
+ MarkAsDeleted();
+ }
+}
+
+CommandsCompletedQuery::~CommandsCompletedQuery() {}
+
+QueryManager::QueryManager(
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info)
+ : decoder_(decoder),
+ use_arb_occlusion_query2_for_occlusion_query_boolean_(
+ feature_info->feature_flags(
+ ).use_arb_occlusion_query2_for_occlusion_query_boolean),
+ use_arb_occlusion_query_for_occlusion_query_boolean_(
+ feature_info->feature_flags(
+ ).use_arb_occlusion_query_for_occlusion_query_boolean),
+ query_count_(0) {
+ DCHECK(!(use_arb_occlusion_query_for_occlusion_query_boolean_ &&
+ use_arb_occlusion_query2_for_occlusion_query_boolean_));
+}
+
+QueryManager::~QueryManager() {
+ DCHECK(queries_.empty());
+
+ // If this triggers, that means something is keeping a reference to
+ // a Query belonging to this.
+ CHECK_EQ(query_count_, 0u);
+}
+
+void QueryManager::Destroy(bool have_context) {
+ pending_queries_.clear();
+ pending_transfer_queries_.clear();
+ while (!queries_.empty()) {
+ Query* query = queries_.begin()->second.get();
+ query->Destroy(have_context);
+ queries_.erase(queries_.begin());
+ }
+}
+
+QueryManager::Query* QueryManager::CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset) {
+ scoped_refptr<Query> query;
+ switch (target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ query = new CommandsIssuedQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ query = new CommandLatencyQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ // Currently async pixel transfer delegates only support uploads.
+ query = new AsyncPixelTransfersCompletedQuery(
+ this, target, shm_id, shm_offset);
+ break;
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ query = new AsyncReadPixelsCompletedQuery(
+ this, target, shm_id, shm_offset);
+ break;
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ query = new GetErrorQuery(this, target, shm_id, shm_offset);
+ break;
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ query = new CommandsCompletedQuery(this, target, shm_id, shm_offset);
+ break;
+ default: {
+ GLuint service_id = 0;
+ glGenQueriesARB(1, &service_id);
+ DCHECK_NE(0u, service_id);
+ query = new AllSamplesPassedQuery(
+ this, target, shm_id, shm_offset, service_id);
+ break;
+ }
+ }
+ std::pair<QueryMap::iterator, bool> result =
+ queries_.insert(std::make_pair(client_id, query));
+ DCHECK(result.second);
+ return query.get();
+}
+
+void QueryManager::GenQueries(GLsizei n, const GLuint* queries) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ generated_query_ids_.insert(queries[i]);
+ }
+}
+
+bool QueryManager::IsValidQuery(GLuint id) {
+ GeneratedQueryIds::iterator it = generated_query_ids_.find(id);
+ return it != generated_query_ids_.end();
+}
+
+QueryManager::Query* QueryManager::GetQuery(
+ GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ return it != queries_.end() ? it->second.get() : NULL;
+}
+
+void QueryManager::RemoveQuery(GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ if (it != queries_.end()) {
+ Query* query = it->second.get();
+ RemovePendingQuery(query);
+ query->MarkAsDeleted();
+ queries_.erase(it);
+ }
+ generated_query_ids_.erase(client_id);
+}
+
+void QueryManager::StartTracking(QueryManager::Query* /* query */) {
+ ++query_count_;
+}
+
+void QueryManager::StopTracking(QueryManager::Query* /* query */) {
+ --query_count_;
+}
+
+GLenum QueryManager::AdjustTargetForEmulation(GLenum target) {
+ switch (target) {
+ case GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT:
+ case GL_ANY_SAMPLES_PASSED_EXT:
+ if (use_arb_occlusion_query2_for_occlusion_query_boolean_) {
+ // ARB_occlusion_query2 does not have a
+ // GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT
+ // target.
+ target = GL_ANY_SAMPLES_PASSED_EXT;
+ } else if (use_arb_occlusion_query_for_occlusion_query_boolean_) {
+ // ARB_occlusion_query does not have a
+ // GL_ANY_SAMPLES_PASSED_EXT
+ // target.
+ target = GL_SAMPLES_PASSED_ARB;
+ }
+ break;
+ default:
+ break;
+ }
+ return target;
+}
+
+void QueryManager::BeginQueryHelper(GLenum target, GLuint id) {
+ target = AdjustTargetForEmulation(target);
+ glBeginQueryARB(target, id);
+}
+
+void QueryManager::EndQueryHelper(GLenum target) {
+ target = AdjustTargetForEmulation(target);
+ glEndQueryARB(target);
+}
+
+QueryManager::Query::Query(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset)
+ : manager_(manager),
+ target_(target),
+ shm_id_(shm_id),
+ shm_offset_(shm_offset),
+ submit_count_(0),
+ pending_(false),
+ deleted_(false) {
+ DCHECK(manager);
+ manager_->StartTracking(this);
+}
+
+void QueryManager::Query::RunCallbacks() {
+ for (size_t i = 0; i < callbacks_.size(); i++) {
+ callbacks_[i].Run();
+ }
+ callbacks_.clear();
+}
+
+void QueryManager::Query::AddCallback(base::Closure callback) {
+ if (pending_) {
+ callbacks_.push_back(callback);
+ } else {
+ callback.Run();
+ }
+}
+
+QueryManager::Query::~Query() {
+ // The query is getting deleted, either by the client or
+ // because the context was lost. Call any outstanding
+ // callbacks to avoid leaks.
+ RunCallbacks();
+ if (manager_) {
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+bool QueryManager::Query::MarkAsCompleted(uint64 result) {
+ DCHECK(pending_);
+ QuerySync* sync = manager_->decoder_->GetSharedMemoryAs<QuerySync*>(
+ shm_id_, shm_offset_, sizeof(*sync));
+ if (!sync) {
+ return false;
+ }
+
+ pending_ = false;
+ sync->result = result;
+ base::subtle::Release_Store(&sync->process_count, submit_count_);
+
+ return true;
+}
+
+bool QueryManager::ProcessPendingQueries() {
+ while (!pending_queries_.empty()) {
+ Query* query = pending_queries_.front().get();
+ if (!query->Process()) {
+ return false;
+ }
+ if (query->pending()) {
+ break;
+ }
+ query->RunCallbacks();
+ pending_queries_.pop_front();
+ }
+
+ return true;
+}
+
+bool QueryManager::HavePendingQueries() {
+ return !pending_queries_.empty();
+}
+
+bool QueryManager::ProcessPendingTransferQueries() {
+ while (!pending_transfer_queries_.empty()) {
+ Query* query = pending_transfer_queries_.front().get();
+ if (!query->Process()) {
+ return false;
+ }
+ if (query->pending()) {
+ break;
+ }
+ query->RunCallbacks();
+ pending_transfer_queries_.pop_front();
+ }
+
+ return true;
+}
+
+bool QueryManager::HavePendingTransferQueries() {
+ return !pending_transfer_queries_.empty();
+}
+
+bool QueryManager::AddPendingQuery(Query* query,
+ base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ DCHECK(!query->IsDeleted());
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ query->MarkAsPending(submit_count);
+ pending_queries_.push_back(query);
+ return true;
+}
+
+bool QueryManager::AddPendingTransferQuery(
+ Query* query,
+ base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ DCHECK(!query->IsDeleted());
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ query->MarkAsPending(submit_count);
+ pending_transfer_queries_.push_back(query);
+ return true;
+}
+
+bool QueryManager::RemovePendingQuery(Query* query) {
+ DCHECK(query);
+ if (query->pending()) {
+ // TODO(gman): Speed this up if this is a common operation. This would only
+ // happen if you do being/end begin/end on the same query without waiting
+ // for the first one to finish.
+ for (QueryQueue::iterator it = pending_queries_.begin();
+ it != pending_queries_.end(); ++it) {
+ if (it->get() == query) {
+ pending_queries_.erase(it);
+ break;
+ }
+ }
+ for (QueryQueue::iterator it = pending_transfer_queries_.begin();
+ it != pending_transfer_queries_.end(); ++it) {
+ if (it->get() == query) {
+ pending_transfer_queries_.erase(it);
+ break;
+ }
+ }
+ if (!query->MarkAsCompleted(0)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool QueryManager::BeginQuery(Query* query) {
+ DCHECK(query);
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ return query->Begin();
+}
+
+bool QueryManager::EndQuery(Query* query, base::subtle::Atomic32 submit_count) {
+ DCHECK(query);
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
+ return query->End(submit_count);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/query_manager.h b/gpu/command_buffer/service/query_manager.h
new file mode 100644
index 0000000..62da3b8
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager.h
@@ -0,0 +1,249 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
+
+#include <deque>
+#include <vector>
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class GLES2Decoder;
+
+namespace gles2 {
+
+class FeatureInfo;
+
+// This class keeps track of the queries and their state
+// As Queries are not shared there is one QueryManager per context.
+class GPU_EXPORT QueryManager {
+ public:
+ class GPU_EXPORT Query : public base::RefCounted<Query> {
+ public:
+ Query(
+ QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset);
+
+ GLenum target() const {
+ return target_;
+ }
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return target() && !IsDeleted();
+ }
+
+ bool pending() const {
+ return pending_;
+ }
+
+ int32 shm_id() const {
+ return shm_id_;
+ }
+
+ uint32 shm_offset() const {
+ return shm_offset_;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool Begin() = 0;
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool End(base::subtle::Atomic32 submit_count) = 0;
+
+ // Returns false if shared memory for sync is invalid.
+ virtual bool Process() = 0;
+
+ virtual void Destroy(bool have_context) = 0;
+
+ void AddCallback(base::Closure callback);
+
+ protected:
+ virtual ~Query();
+
+ QueryManager* manager() const {
+ return manager_;
+ }
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool MarkAsCompleted(uint64 result);
+
+ void MarkAsPending(base::subtle::Atomic32 submit_count) {
+ DCHECK(!pending_);
+ pending_ = true;
+ submit_count_ = submit_count;
+ }
+
+ void UnmarkAsPending() {
+ DCHECK(pending_);
+ pending_ = false;
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool AddToPendingQueue(base::subtle::Atomic32 submit_count) {
+ return manager_->AddPendingQuery(this, submit_count);
+ }
+
+ // Returns false if shared memory for sync is invalid.
+ bool AddToPendingTransferQueue(base::subtle::Atomic32 submit_count) {
+ return manager_->AddPendingTransferQuery(this, submit_count);
+ }
+
+ void BeginQueryHelper(GLenum target, GLuint id) {
+ manager_->BeginQueryHelper(target, id);
+ }
+
+ void EndQueryHelper(GLenum target) {
+ manager_->EndQueryHelper(target);
+ }
+
+ base::subtle::Atomic32 submit_count() const { return submit_count_; }
+
+ private:
+ friend class QueryManager;
+ friend class QueryManagerTest;
+ friend class base::RefCounted<Query>;
+
+ void RunCallbacks();
+
+ // The manager that owns this Query.
+ QueryManager* manager_;
+
+ // The type of query.
+ GLenum target_;
+
+ // The shared memory used with this Query.
+ int32 shm_id_;
+ uint32 shm_offset_;
+
+ // Count to set process count do when completed.
+ base::subtle::Atomic32 submit_count_;
+
+ // True if in the queue.
+ bool pending_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // List of callbacks to run when result is available.
+ std::vector<base::Closure> callbacks_;
+ };
+
+ QueryManager(
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info);
+ ~QueryManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Query for the given query.
+ Query* CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset);
+
+ // Gets the query info for the given query.
+ Query* GetQuery(GLuint client_id);
+
+ // Removes a query info for the given query.
+ void RemoveQuery(GLuint client_id);
+
+ // Returns false if any query is pointing to invalid shared memory.
+ bool BeginQuery(Query* query);
+
+ // Returns false if any query is pointing to invalid shared memory.
+ bool EndQuery(Query* query, base::subtle::Atomic32 submit_count);
+
+ // Processes pending queries. Returns false if any queries are pointing
+ // to invalid shared memory.
+ bool ProcessPendingQueries();
+
+ // True if there are pending queries.
+ bool HavePendingQueries();
+
+ // Processes pending transfer queries. Returns false if any queries are
+ // pointing to invalid shared memory.
+ bool ProcessPendingTransferQueries();
+
+ // True if there are pending transfer queries.
+ bool HavePendingTransferQueries();
+
+ GLES2Decoder* decoder() const {
+ return decoder_;
+ }
+
+ void GenQueries(GLsizei n, const GLuint* queries);
+ bool IsValidQuery(GLuint id);
+
+ private:
+ void StartTracking(Query* query);
+ void StopTracking(Query* query);
+
+ // Wrappers for BeginQueryARB and EndQueryARB to hide differences between
+ // ARB_occlusion_query2 and EXT_occlusion_query_boolean.
+ void BeginQueryHelper(GLenum target, GLuint id);
+ void EndQueryHelper(GLenum target);
+
+ // Adds to queue of queries waiting for completion.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool AddPendingQuery(Query* query, base::subtle::Atomic32 submit_count);
+
+ // Adds to queue of transfer queries waiting for completion.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool AddPendingTransferQuery(Query* query,
+ base::subtle::Atomic32 submit_count);
+
+ // Removes a query from the queue of pending queries.
+ // Returns false if any query is pointing to invalid shared memory.
+ bool RemovePendingQuery(Query* query);
+
+ // Returns a target used for the underlying GL extension
+ // used to emulate a query.
+ GLenum AdjustTargetForEmulation(GLenum target);
+
+ // Used to validate shared memory and get GL errors.
+ GLES2Decoder* decoder_;
+
+ bool use_arb_occlusion_query2_for_occlusion_query_boolean_;
+ bool use_arb_occlusion_query_for_occlusion_query_boolean_;
+
+ // Counts the number of Queries allocated with 'this' as their manager.
+ // Allows checking no Query will outlive this.
+ unsigned query_count_;
+
+ // Info for each query in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Query> > QueryMap;
+ QueryMap queries_;
+
+ typedef base::hash_set<GLuint> GeneratedQueryIds;
+ GeneratedQueryIds generated_query_ids_;
+
+ // Queries waiting for completion.
+ typedef std::deque<scoped_refptr<Query> > QueryQueue;
+ QueryQueue pending_queries_;
+
+ // Async pixel transfer queries waiting for completion.
+ QueryQueue pending_transfer_queries_;
+
+ DISALLOW_COPY_AND_ASSIGN(QueryManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_QUERY_MANAGER_H_
diff --git a/gpu/command_buffer/service/query_manager_unittest.cc b/gpu/command_buffer/service/query_manager_unittest.cc
new file mode 100644
index 0000000..9f0156f
--- /dev/null
+++ b/gpu/command_buffer/service/query_manager_unittest.cc
@@ -0,0 +1,575 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/query_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::SetArgumentPointee;
+
+namespace gpu {
+namespace gles2 {
+
+class QueryManagerTest : public GpuServiceTest {
+ public:
+ static const int32 kSharedMemoryId = 401;
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32 kSharedMemoryOffset = 132;
+ static const int32 kInvalidSharedMemoryId = 402;
+ static const uint32 kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const uint32 kInitialResult = 0xBDBDBDBDu;
+ static const uint8 kInitialMemoryValue = 0xBDu;
+
+ QueryManagerTest() {
+ }
+ virtual ~QueryManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ engine_.reset(new MockCommandBufferEngine());
+ decoder_.reset(new MockGLES2Decoder());
+ decoder_->set_engine(engine_.get());
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_EXT_occlusion_query_boolean");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ manager_.reset(new QueryManager(decoder_.get(), feature_info.get()));
+ }
+
+ virtual void TearDown() {
+ decoder_.reset();
+ manager_->Destroy(false);
+ manager_.reset();
+ engine_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ QueryManager::Query* CreateQuery(
+ GLenum target, GLuint client_id, int32 shm_id, uint32 shm_offset,
+ GLuint service_id) {
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(service_id))
+ .RetiresOnSaturation();
+ return manager_->CreateQuery(target, client_id, shm_id, shm_offset);
+ }
+
+ void QueueQuery(QueryManager::Query* query,
+ GLuint service_id,
+ base::subtle::Atomic32 submit_count) {
+ EXPECT_CALL(*gl_, BeginQueryARB(query->target(), service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(query->target()))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->BeginQuery(query));
+ EXPECT_TRUE(manager_->EndQuery(query, submit_count));
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_ptr<QueryManager> manager_;
+
+ private:
+ class MockCommandBufferEngine : public CommandBufferEngine {
+ public:
+ MockCommandBufferEngine() {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kSharedBufferSize);
+ valid_buffer_ =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), kSharedBufferSize);
+ data_ = static_cast<uint8*>(valid_buffer_->memory());
+ ClearSharedMemory();
+ }
+
+ virtual ~MockCommandBufferEngine() {
+ }
+
+ virtual scoped_refptr<gpu::Buffer> GetSharedMemoryBuffer(int32 shm_id)
+ OVERRIDE {
+ return shm_id == kSharedMemoryId ? valid_buffer_ : invalid_buffer_;
+ }
+
+ void ClearSharedMemory() {
+ memset(data_, kInitialMemoryValue, kSharedBufferSize);
+ }
+
+ virtual void set_token(int32 token) OVERRIDE {
+ DCHECK(false);
+ }
+
+ virtual bool SetGetBuffer(int32 /* transfer_buffer_id */) OVERRIDE {
+ DCHECK(false);
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual bool SetGetOffset(int32 offset) OVERRIDE {
+ DCHECK(false);
+ return false;
+ }
+
+ // Overridden from CommandBufferEngine.
+ virtual int32 GetGetOffset() OVERRIDE {
+ DCHECK(false);
+ return 0;
+ }
+
+ private:
+ uint8* data_;
+ scoped_refptr<gpu::Buffer> valid_buffer_;
+ scoped_refptr<gpu::Buffer> invalid_buffer_;
+ };
+
+ scoped_ptr<MockCommandBufferEngine> engine_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const int32 QueryManagerTest::kSharedMemoryId;
+const size_t QueryManagerTest::kSharedBufferSize;
+const uint32 QueryManagerTest::kSharedMemoryOffset;
+const int32 QueryManagerTest::kInvalidSharedMemoryId;
+const uint32 QueryManagerTest::kInvalidSharedMemoryOffset;
+const uint32 QueryManagerTest::kInitialResult;
+const uint8 QueryManagerTest::kInitialMemoryValue;
+#endif
+
+TEST_F(QueryManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+
+ EXPECT_FALSE(manager_->HavePendingQueries());
+ // Check we can create a Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(GL_ANY_SAMPLES_PASSED_EXT, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+ // Check we can get the same Query.
+ EXPECT_EQ(query.get(), manager_->GetQuery(kClient1Id));
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient2Id) == NULL);
+ // Check we can delete the query.
+ manager_->RemoveQuery(kClient1Id);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient1Id) == NULL);
+ // Check query is deleted
+ EXPECT_TRUE(query->IsDeleted());
+ EXPECT_FALSE(manager_->HavePendingQueries());
+}
+
+TEST_F(QueryManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(GL_ANY_SAMPLES_PASSED_EXT, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+ EXPECT_CALL(*gl_, DeleteQueriesARB(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(manager_->GetQuery(kClient1Id) == NULL);
+ // Check query is deleted
+ EXPECT_TRUE(query->IsDeleted());
+}
+
+TEST_F(QueryManagerTest, QueryBasic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ EXPECT_TRUE(query->IsValid());
+ EXPECT_FALSE(query->IsDeleted());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(kTarget, query->target());
+ EXPECT_EQ(kSharedMemoryId, query->shm_id());
+ EXPECT_EQ(kSharedMemoryOffset, query->shm_offset());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Check nothing happens if there are no pending queries.
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Setup shared memory like client would.
+ QuerySync* sync = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync));
+ ASSERT_TRUE(sync != NULL);
+ sync->Reset();
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+ EXPECT_TRUE(query->pending());
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with return not available.
+ // Expect 1 GL command.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_TRUE(query->pending());
+ EXPECT_EQ(0, sync->process_count);
+ EXPECT_EQ(0u, sync->result);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_FALSE(query->pending());
+ EXPECT_EQ(kSubmitCount, sync->process_count);
+ EXPECT_EQ(kResult, sync->result);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+
+ // Process with no queries.
+ // Expect no GL commands/
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingQueries) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ const GLuint kService2Id = 12;
+ const GLuint kClient3Id = 3;
+ const GLuint kService3Id = 13;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount1 = 123;
+ const base::subtle::Atomic32 kSubmitCount2 = 123;
+ const base::subtle::Atomic32 kSubmitCount3 = 123;
+ const GLuint kResult1 = 1;
+ const GLuint kResult2 = 1;
+ const GLuint kResult3 = 1;
+
+ // Setup shared memory like client would.
+ QuerySync* sync1 = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync1) * 3);
+ ASSERT_TRUE(sync1 != NULL);
+ QuerySync* sync2 = sync1 + 1;
+ QuerySync* sync3 = sync2 + 1;
+
+ // Create Queries.
+ scoped_refptr<QueryManager::Query> query1(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 0,
+ kService1Id));
+ scoped_refptr<QueryManager::Query> query2(
+ CreateQuery(kTarget, kClient2Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 1,
+ kService2Id));
+ scoped_refptr<QueryManager::Query> query3(
+ CreateQuery(kTarget, kClient3Id,
+ kSharedMemoryId, kSharedMemoryOffset + sizeof(*sync1) * 2,
+ kService3Id));
+ ASSERT_TRUE(query1.get() != NULL);
+ ASSERT_TRUE(query2.get() != NULL);
+ ASSERT_TRUE(query3.get() != NULL);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+
+ sync1->Reset();
+ sync2->Reset();
+ sync3->Reset();
+
+ // Queue them
+ QueueQuery(query1.get(), kService1Id, kSubmitCount1);
+ QueueQuery(query2.get(), kService2Id, kSubmitCount2);
+ QueueQuery(query3.get(), kService3Id, kSubmitCount3);
+ EXPECT_TRUE(query1->pending());
+ EXPECT_TRUE(query2->pending());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with return available for first 2 queries.
+ // Expect 4 GL commands.
+ {
+ InSequence s;
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService2Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService2Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ }
+ EXPECT_FALSE(query1->pending());
+ EXPECT_FALSE(query2->pending());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_EQ(kSubmitCount1, sync1->process_count);
+ EXPECT_EQ(kSubmitCount2, sync2->process_count);
+ EXPECT_EQ(kResult1, sync1->result);
+ EXPECT_EQ(kResult2, sync2->result);
+ EXPECT_EQ(0, sync3->process_count);
+ EXPECT_EQ(0u, sync3->result);
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with renaming query. No result.
+ // Expect 1 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_TRUE(query3->pending());
+ EXPECT_EQ(0, sync3->process_count);
+ EXPECT_EQ(0u, sync3->result);
+ EXPECT_TRUE(manager_->HavePendingQueries());
+
+ // Process with renaming query. With result.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService3Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult3))
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager_->ProcessPendingQueries());
+ EXPECT_FALSE(query3->pending());
+ EXPECT_EQ(kSubmitCount3, sync3->process_count);
+ EXPECT_EQ(kResult3, sync3->result);
+ EXPECT_FALSE(manager_->HavePendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingBadSharedMemoryId) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kInvalidSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_FALSE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ProcessPendingBadSharedMemoryOffset) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ const GLuint kResult = 1;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kInvalidSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+
+ // Process with return available.
+ // Expect 2 GL commands.
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_AVAILABLE_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_,
+ GetQueryObjectuivARB(kService1Id, GL_QUERY_RESULT_EXT, _))
+ .WillOnce(SetArgumentPointee<2>(kResult))
+ .RetiresOnSaturation();
+ EXPECT_FALSE(manager_->ProcessPendingQueries());
+}
+
+TEST_F(QueryManagerTest, ExitWithPendingQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ // Create Query.
+ scoped_refptr<QueryManager::Query> query(
+ CreateQuery(kTarget, kClient1Id,
+ kSharedMemoryId, kSharedMemoryOffset, kService1Id));
+ ASSERT_TRUE(query.get() != NULL);
+
+ // Queue it
+ QueueQuery(query.get(), kService1Id, kSubmitCount);
+}
+
+// Test that when based on ARB_occlusion_query2 we use GL_ANY_SAMPLES_PASSED_ARB
+// for GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT
+TEST_F(QueryManagerTest, ARBOcclusionQuery2) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_ARB_occlusion_query2");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kService1Id))
+ .RetiresOnSaturation();
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_ANY_SAMPLES_PASSED_EXT, kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(GL_ANY_SAMPLES_PASSED_EXT))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager->BeginQuery(query));
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ manager->Destroy(false);
+}
+
+// Test that when based on ARB_occlusion_query we use GL_SAMPLES_PASSED_ARB
+// for GL_ANY_SAMPLES_PASSED_EXT
+TEST_F(QueryManagerTest, ARBOcclusionQuery) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kTarget = GL_ANY_SAMPLES_PASSED_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(),
+ "GL_ARB_occlusion_query");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ EXPECT_CALL(*gl_, GenQueriesARB(1, _))
+ .WillOnce(SetArgumentPointee<1>(kService1Id))
+ .RetiresOnSaturation();
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ EXPECT_CALL(*gl_, BeginQueryARB(GL_SAMPLES_PASSED_ARB, kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, EndQueryARB(GL_SAMPLES_PASSED_ARB))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_TRUE(manager->BeginQuery(query));
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ manager->Destroy(false);
+}
+
+TEST_F(QueryManagerTest, GetErrorQuery) {
+ const GLuint kClient1Id = 1;
+ const GLenum kTarget = GL_GET_ERROR_QUERY_CHROMIUM;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), "");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ scoped_ptr<QueryManager> manager(
+ new QueryManager(decoder_.get(), feature_info.get()));
+
+ QueryManager::Query* query = manager->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ // Setup shared memory like client would.
+ QuerySync* sync = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync));
+ ASSERT_TRUE(sync != NULL);
+ sync->Reset();
+
+ EXPECT_TRUE(manager->BeginQuery(query));
+
+ MockErrorState mock_error_state;
+ EXPECT_CALL(*decoder_.get(), GetErrorState())
+ .WillRepeatedly(Return(&mock_error_state));
+ EXPECT_CALL(mock_error_state, GetGLError())
+ .WillOnce(Return(GL_INVALID_ENUM))
+ .RetiresOnSaturation();
+
+ EXPECT_TRUE(manager->EndQuery(query, kSubmitCount));
+ EXPECT_FALSE(query->pending());
+
+ EXPECT_EQ(static_cast<GLuint>(GL_INVALID_ENUM), sync->result);
+
+ manager->Destroy(false);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/renderbuffer_manager.cc b/gpu/command_buffer/service/renderbuffer_manager.cc
new file mode 100644
index 0000000..ff8ae7b
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager.cc
@@ -0,0 +1,233 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This should contain everything to uniquely identify a Renderbuffer.
+static const char RenderbufferTag[] = "|Renderbuffer|";
+struct RenderbufferSignature {
+ GLenum internal_format_;
+ GLsizei samples_;
+ GLsizei width_;
+ GLsizei height_;
+
+ // Since we will be hashing this signature structure, the padding must be
+ // zero initialized. Although the C++11 specifications specify that this is
+ // true, we will use a constructor with a memset to further enforce it instead
+ // of relying on compilers adhering to this deep dark corner specification.
+ RenderbufferSignature(GLenum internal_format,
+ GLsizei samples,
+ GLsizei width,
+ GLsizei height) {
+ memset(this, 0, sizeof(RenderbufferSignature));
+ internal_format_ = internal_format;
+ samples_ = samples;
+ width_ = width;
+ height_ = height;
+ }
+};
+
+RenderbufferManager::RenderbufferManager(
+ MemoryTracker* memory_tracker,
+ GLint max_renderbuffer_size,
+ GLint max_samples,
+ bool depth24_supported)
+ : memory_tracker_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kUnmanaged)),
+ max_renderbuffer_size_(max_renderbuffer_size),
+ max_samples_(max_samples),
+ depth24_supported_(depth24_supported),
+ num_uncleared_renderbuffers_(0),
+ renderbuffer_count_(0),
+ have_context_(true) {
+}
+
+RenderbufferManager::~RenderbufferManager() {
+ DCHECK(renderbuffers_.empty());
+ // If this triggers, that means something is keeping a reference to
+ // a Renderbuffer belonging to this.
+ CHECK_EQ(renderbuffer_count_, 0u);
+
+ DCHECK_EQ(0, num_uncleared_renderbuffers_);
+}
+
+size_t Renderbuffer::EstimatedSize() {
+ uint32 size = 0;
+ manager_->ComputeEstimatedRenderbufferSize(
+ width_, height_, samples_, internal_format_, &size);
+ return size;
+}
+
+
+size_t Renderbuffer::GetSignatureSize() const {
+ return sizeof(RenderbufferTag) + sizeof(RenderbufferSignature);
+}
+
+void Renderbuffer::AddToSignature(std::string* signature) const {
+ DCHECK(signature);
+ RenderbufferSignature signature_data(internal_format_,
+ samples_,
+ width_,
+ height_);
+
+ signature->append(RenderbufferTag, sizeof(RenderbufferTag));
+ signature->append(reinterpret_cast<const char*>(&signature_data),
+ sizeof(signature_data));
+}
+
+Renderbuffer::Renderbuffer(RenderbufferManager* manager,
+ GLuint client_id,
+ GLuint service_id)
+ : manager_(manager),
+ client_id_(client_id),
+ service_id_(service_id),
+ cleared_(true),
+ has_been_bound_(false),
+ samples_(0),
+ internal_format_(GL_RGBA4),
+ width_(0),
+ height_(0) {
+ manager_->StartTracking(this);
+}
+
+Renderbuffer::~Renderbuffer() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ GLuint id = service_id();
+ glDeleteRenderbuffersEXT(1, &id);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void RenderbufferManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ renderbuffers_.clear();
+ DCHECK_EQ(0u, memory_tracker_->GetMemRepresented());
+}
+
+void RenderbufferManager::StartTracking(Renderbuffer* /* renderbuffer */) {
+ ++renderbuffer_count_;
+}
+
+void RenderbufferManager::StopTracking(Renderbuffer* renderbuffer) {
+ --renderbuffer_count_;
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ memory_tracker_->TrackMemFree(renderbuffer->EstimatedSize());
+}
+
+void RenderbufferManager::SetInfo(
+ Renderbuffer* renderbuffer,
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) {
+ DCHECK(renderbuffer);
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ memory_tracker_->TrackMemFree(renderbuffer->EstimatedSize());
+ renderbuffer->SetInfo(samples, internalformat, width, height);
+ memory_tracker_->TrackMemAlloc(renderbuffer->EstimatedSize());
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+void RenderbufferManager::SetCleared(Renderbuffer* renderbuffer,
+ bool cleared) {
+ DCHECK(renderbuffer);
+ if (!renderbuffer->cleared()) {
+ --num_uncleared_renderbuffers_;
+ }
+ renderbuffer->set_cleared(cleared);
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+void RenderbufferManager::CreateRenderbuffer(
+ GLuint client_id, GLuint service_id) {
+ scoped_refptr<Renderbuffer> renderbuffer(
+ new Renderbuffer(this, client_id, service_id));
+ std::pair<RenderbufferMap::iterator, bool> result =
+ renderbuffers_.insert(std::make_pair(client_id, renderbuffer));
+ DCHECK(result.second);
+ if (!renderbuffer->cleared()) {
+ ++num_uncleared_renderbuffers_;
+ }
+}
+
+Renderbuffer* RenderbufferManager::GetRenderbuffer(
+ GLuint client_id) {
+ RenderbufferMap::iterator it = renderbuffers_.find(client_id);
+ return it != renderbuffers_.end() ? it->second.get() : NULL;
+}
+
+void RenderbufferManager::RemoveRenderbuffer(GLuint client_id) {
+ RenderbufferMap::iterator it = renderbuffers_.find(client_id);
+ if (it != renderbuffers_.end()) {
+ Renderbuffer* renderbuffer = it->second.get();
+ renderbuffer->MarkAsDeleted();
+ renderbuffers_.erase(it);
+ }
+}
+
+bool RenderbufferManager::ComputeEstimatedRenderbufferSize(int width,
+ int height,
+ int samples,
+ int internal_format,
+ uint32* size) const {
+ DCHECK(size);
+
+ uint32 temp = 0;
+ if (!SafeMultiplyUint32(width, height, &temp)) {
+ return false;
+ }
+ if (!SafeMultiplyUint32(temp, samples, &temp)) {
+ return false;
+ }
+ GLenum impl_format = InternalRenderbufferFormatToImplFormat(internal_format);
+ if (!SafeMultiplyUint32(
+ temp, GLES2Util::RenderbufferBytesPerPixel(impl_format), &temp)) {
+ return false;
+ }
+ *size = temp;
+ return true;
+}
+
+GLenum RenderbufferManager::InternalRenderbufferFormatToImplFormat(
+ GLenum impl_format) const {
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
+ switch (impl_format) {
+ case GL_DEPTH_COMPONENT16:
+ return GL_DEPTH_COMPONENT;
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ return GL_RGBA;
+ case GL_RGB565:
+ return GL_RGB;
+ }
+ } else {
+ // Upgrade 16-bit depth to 24-bit if possible.
+ if (impl_format == GL_DEPTH_COMPONENT16 && depth24_supported_)
+ return GL_DEPTH_COMPONENT24;
+ }
+ return impl_format;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/renderbuffer_manager.h b/gpu/command_buffer/service/renderbuffer_manager.h
new file mode 100644
index 0000000..71f830a
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager.h
@@ -0,0 +1,205 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class RenderbufferManager;
+
+// Info about a Renderbuffer.
+class GPU_EXPORT Renderbuffer
+ : public base::RefCounted<Renderbuffer> {
+ public:
+ Renderbuffer(RenderbufferManager* manager,
+ GLuint client_id,
+ GLuint service_id);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLuint client_id() const {
+ return client_id_;
+ }
+
+ bool cleared() const {
+ return cleared_;
+ }
+
+ GLenum internal_format() const {
+ return internal_format_;
+ }
+
+ GLsizei samples() const {
+ return samples_;
+ }
+
+ GLsizei width() const {
+ return width_;
+ }
+
+ GLsizei height() const {
+ return height_;
+ }
+
+ bool IsDeleted() const {
+ return client_id_ == 0;
+ }
+
+ void MarkAsValid() {
+ has_been_bound_ = true;
+ }
+
+ bool IsValid() const {
+ return has_been_bound_ && !IsDeleted();
+ }
+
+ size_t EstimatedSize();
+
+ size_t GetSignatureSize() const;
+ void AddToSignature(std::string* signature) const;
+
+ private:
+ friend class RenderbufferManager;
+ friend class base::RefCounted<Renderbuffer>;
+
+ ~Renderbuffer();
+
+ void set_cleared(bool cleared) {
+ cleared_ = cleared;
+ }
+
+ void SetInfo(
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) {
+ samples_ = samples;
+ internal_format_ = internalformat;
+ width_ = width;
+ height_ = height;
+ cleared_ = false;
+ }
+
+ void MarkAsDeleted() {
+ client_id_ = 0;
+ }
+
+ // RenderbufferManager that owns this Renderbuffer.
+ RenderbufferManager* manager_;
+
+ // Client side renderbuffer id.
+ GLuint client_id_;
+
+ // Service side renderbuffer id.
+ GLuint service_id_;
+
+ // Whether this renderbuffer has been cleared
+ bool cleared_;
+
+ // Whether this renderbuffer has ever been bound.
+ bool has_been_bound_;
+
+ // Number of samples (for multi-sampled renderbuffers)
+ GLsizei samples_;
+
+ // Renderbuffer internalformat set through RenderbufferStorage().
+ GLenum internal_format_;
+
+ // Dimensions of renderbuffer.
+ GLsizei width_;
+ GLsizei height_;
+};
+
+// This class keeps track of the renderbuffers and whether or not they have
+// been cleared.
+class GPU_EXPORT RenderbufferManager {
+ public:
+ RenderbufferManager(MemoryTracker* memory_tracker,
+ GLint max_renderbuffer_size,
+ GLint max_samples,
+ bool depth24_supported);
+ ~RenderbufferManager();
+
+ GLint max_renderbuffer_size() const {
+ return max_renderbuffer_size_;
+ }
+
+ GLint max_samples() const {
+ return max_samples_;
+ }
+
+ bool HaveUnclearedRenderbuffers() const {
+ return num_uncleared_renderbuffers_ != 0;
+ }
+
+ void SetInfo(
+ Renderbuffer* renderbuffer,
+ GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+
+ void SetCleared(Renderbuffer* renderbuffer, bool cleared);
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a Renderbuffer for the given renderbuffer ids.
+ void CreateRenderbuffer(GLuint client_id, GLuint service_id);
+
+ // Gets the renderbuffer for the given renderbuffer id.
+ Renderbuffer* GetRenderbuffer(GLuint client_id);
+
+ // Removes a renderbuffer for the given renderbuffer id.
+ void RemoveRenderbuffer(GLuint client_id);
+
+ size_t mem_represented() const {
+ return memory_tracker_->GetMemRepresented();
+ }
+
+ bool ComputeEstimatedRenderbufferSize(int width,
+ int height,
+ int samples,
+ int internal_format,
+ uint32* size) const;
+ GLenum InternalRenderbufferFormatToImplFormat(GLenum impl_format) const;
+
+ private:
+ friend class Renderbuffer;
+
+ void StartTracking(Renderbuffer* renderbuffer);
+ void StopTracking(Renderbuffer* renderbuffer);
+
+ scoped_ptr<MemoryTypeTracker> memory_tracker_;
+
+ GLint max_renderbuffer_size_;
+ GLint max_samples_;
+ bool depth24_supported_;
+
+ int num_uncleared_renderbuffers_;
+
+ // Counts the number of Renderbuffer allocated with 'this' as its manager.
+ // Allows to check no Renderbuffer will outlive this.
+ unsigned renderbuffer_count_;
+
+ bool have_context_;
+
+ // Info for each renderbuffer in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<Renderbuffer> > RenderbufferMap;
+ RenderbufferMap renderbuffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(RenderbufferManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RENDERBUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/renderbuffer_manager_unittest.cc b/gpu/command_buffer/service/renderbuffer_manager_unittest.cc
new file mode 100644
index 0000000..ba0ebea
--- /dev/null
+++ b/gpu/command_buffer/service/renderbuffer_manager_unittest.cc
@@ -0,0 +1,323 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/renderbuffer_manager.h"
+
+#include <set>
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+class RenderbufferManagerTestBase : public GpuServiceTest {
+ public:
+ static const GLint kMaxSize = 128;
+ static const GLint kMaxSamples = 4;
+
+ protected:
+ void SetUpBase(MemoryTracker* memory_tracker, bool depth24_supported) {
+ GpuServiceTest::SetUp();
+ manager_.reset(new RenderbufferManager(
+ memory_tracker, kMaxSize, kMaxSamples, depth24_supported));
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(true);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_ptr<RenderbufferManager> manager_;
+};
+
+class RenderbufferManagerTest : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ bool depth24_supported = false;
+ SetUpBase(NULL, depth24_supported);
+ }
+};
+
+class RenderbufferManagerMemoryTrackerTest
+ : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ bool depth24_supported = false;
+ SetUpBase(mock_memory_tracker_.get(), depth24_supported);
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint RenderbufferManagerTestBase::kMaxSize;
+const GLint RenderbufferManagerTestBase::kMaxSamples;
+#endif
+
+TEST_F(RenderbufferManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ EXPECT_EQ(kMaxSize, manager_->max_renderbuffer_size());
+ EXPECT_EQ(kMaxSamples, manager_->max_samples());
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ scoped_refptr<Renderbuffer> renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(kClient1Id, renderbuffer1->client_id());
+ // Check we get nothing for a non-existent renderbuffer.
+ EXPECT_TRUE(manager_->GetRenderbuffer(kClient2Id) == NULL);
+ // Check trying to a remove non-existent renderbuffers does not crash.
+ manager_->RemoveRenderbuffer(kClient2Id);
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the renderbuffer after we remove it.
+ manager_->RemoveRenderbuffer(kClient1Id);
+ EXPECT_TRUE(manager_->GetRenderbuffer(kClient1Id) == NULL);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(0u, renderbuffer1->client_id());
+}
+
+TEST_F(RenderbufferManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->Destroy(true);
+ renderbuffer1 = manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 == NULL);
+}
+
+TEST_F(RenderbufferManagerTest, Renderbuffer) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create renderbuffer.
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ // Check renderbuffer got created.
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+ EXPECT_EQ(kService1Id, renderbuffer1->service_id());
+ EXPECT_EQ(0, renderbuffer1->samples());
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA4), renderbuffer1->internal_format());
+ EXPECT_EQ(0, renderbuffer1->width());
+ EXPECT_EQ(0, renderbuffer1->height());
+ EXPECT_TRUE(renderbuffer1->cleared());
+ EXPECT_EQ(0u, renderbuffer1->EstimatedSize());
+
+ // Check if we set the info it gets marked as not cleared.
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight);
+ EXPECT_EQ(kSamples, renderbuffer1->samples());
+ EXPECT_EQ(kFormat, renderbuffer1->internal_format());
+ EXPECT_EQ(kWidth, renderbuffer1->width());
+ EXPECT_EQ(kHeight, renderbuffer1->height());
+ EXPECT_FALSE(renderbuffer1->cleared());
+ EXPECT_FALSE(renderbuffer1->IsDeleted());
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+ EXPECT_EQ(kWidth * kHeight * 4u * 4u, renderbuffer1->EstimatedSize());
+
+ manager_->SetCleared(renderbuffer1, true);
+ EXPECT_TRUE(renderbuffer1->cleared());
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight);
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_->RemoveRenderbuffer(kClient1Id);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+}
+
+TEST_F(RenderbufferManagerMemoryTrackerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ Renderbuffer* renderbuffer1 =
+ manager_->GetRenderbuffer(kClient1Id);
+ ASSERT_TRUE(renderbuffer1 != NULL);
+
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight1 = 64;
+ const GLsizei kHeight2 = 32;
+ uint32 expected_size_1 = 0;
+ uint32 expected_size_2 = 0;
+ manager_->ComputeEstimatedRenderbufferSize(
+ kWidth, kHeight1, kSamples, kFormat, &expected_size_1);
+ manager_->ComputeEstimatedRenderbufferSize(
+ kWidth, kHeight2, kSamples, kFormat, &expected_size_2);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ 0, expected_size_1, MemoryTracker::kUnmanaged);
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight1);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ expected_size_1, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ 0, expected_size_2, MemoryTracker::kUnmanaged);
+ manager_->SetInfo(renderbuffer1, kSamples, kFormat, kWidth, kHeight2);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(
+ expected_size_2, 0, MemoryTracker::kUnmanaged);
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+TEST_F(RenderbufferManagerTest, UseDeletedRenderbufferInfo) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ scoped_refptr<Renderbuffer> renderbuffer1(
+ manager_->GetRenderbuffer(kClient1Id));
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ // Remove it.
+ manager_->RemoveRenderbuffer(kClient1Id);
+ // Use after removing.
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ // See that it still affects manager.
+ EXPECT_TRUE(manager_->HaveUnclearedRenderbuffers());
+ manager_->SetCleared(renderbuffer1.get(), true);
+ EXPECT_FALSE(manager_->HaveUnclearedRenderbuffers());
+ // Check that the renderbuffer is deleted when the last ref is released.
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ renderbuffer1 = NULL;
+}
+
+namespace {
+
+bool InSet(std::set<std::string>* string_set, const std::string& str) {
+ std::pair<std::set<std::string>::iterator, bool> result =
+ string_set->insert(str);
+ return !result.second;
+}
+
+} // anonymous namespace
+
+TEST_F(RenderbufferManagerTest, AddToSignature) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ manager_->CreateRenderbuffer(kClient1Id, kService1Id);
+ scoped_refptr<Renderbuffer> renderbuffer1(
+ manager_->GetRenderbuffer(kClient1Id));
+ ASSERT_TRUE(renderbuffer1.get() != NULL);
+ const GLsizei kSamples = 4;
+ const GLenum kFormat = GL_RGBA4;
+ const GLsizei kWidth = 128;
+ const GLsizei kHeight = 64;
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ std::string signature1;
+ std::string signature2;
+ renderbuffer1->AddToSignature(&signature1);
+
+ std::set<std::string> string_set;
+ EXPECT_FALSE(InSet(&string_set, signature1));
+
+ // change things and see that the signatures change.
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples + 1, kFormat, kWidth, kHeight);
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat + 1, kWidth, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat, kWidth + 1, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetInfo(
+ renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight + 1);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // put it back to the same and it should be the same.
+ manager_->SetInfo(renderbuffer1.get(), kSamples, kFormat, kWidth, kHeight);
+ signature2.clear();
+ renderbuffer1->AddToSignature(&signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check the set was acutally getting different signatures.
+ EXPECT_EQ(5u, string_set.size());
+
+ EXPECT_CALL(*gl_, DeleteRenderbuffersEXT(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+class RenderbufferManagerFormatTest : public RenderbufferManagerTestBase {
+ protected:
+ virtual void SetUp() {
+ bool depth24_supported = true;
+ SetUpBase(NULL, depth24_supported);
+ }
+};
+
+TEST_F(RenderbufferManagerFormatTest, UpgradeDepthFormatOnGLES) {
+ gfx::GLImplementation prev_impl = gfx::GetGLImplementation();
+ gfx::SetGLImplementation(gfx::kGLImplementationEGLGLES2);
+ GLenum impl_format =
+ manager_->InternalRenderbufferFormatToImplFormat(GL_DEPTH_COMPONENT16);
+ gfx::SetGLImplementation(prev_impl);
+ EXPECT_EQ(static_cast<GLenum>(GL_DEPTH_COMPONENT24), impl_format);
+}
+
+TEST_F(RenderbufferManagerFormatTest, UseUnsizedDepthFormatOnNonGLES) {
+ gfx::GLImplementation prev_impl = gfx::GetGLImplementation();
+ gfx::SetGLImplementation(gfx::kGLImplementationDesktopGL);
+ GLenum impl_format =
+ manager_->InternalRenderbufferFormatToImplFormat(GL_DEPTH_COMPONENT16);
+ gfx::SetGLImplementation(prev_impl);
+ EXPECT_EQ(static_cast<GLenum>(GL_DEPTH_COMPONENT), impl_format);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/shader_manager.cc b/gpu/command_buffer/service/shader_manager.cc
new file mode 100644
index 0000000..189d78b
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_manager.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+
+namespace gpu {
+namespace gles2 {
+
+Shader::Shader(GLuint service_id, GLenum shader_type)
+ : use_count_(0),
+ service_id_(service_id),
+ shader_type_(shader_type),
+ valid_(false) {
+}
+
+Shader::~Shader() {
+}
+
+void Shader::DoCompile(ShaderTranslatorInterface* translator,
+ TranslatedShaderSourceType type) {
+ // Translate GL ES 2.0 shader to Desktop GL shader and pass that to
+ // glShaderSource and then glCompileShader.
+ const char* source_for_driver = source_.c_str();
+ if (translator) {
+ valid_ = translator->Translate(source_,
+ &log_info_,
+ &translated_source_,
+ &attrib_map_,
+ &uniform_map_,
+ &varying_map_,
+ &name_map_);
+ if (!valid_) {
+ return;
+ }
+ signature_source_ = source_;
+ source_for_driver = translated_source_.c_str();
+ }
+
+ glShaderSource(service_id_, 1, &source_for_driver, NULL);
+ glCompileShader(service_id_);
+ if (type == kANGLE) {
+ GLint max_len = 0;
+ glGetShaderiv(service_id_,
+ GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+ &max_len);
+ scoped_ptr<char[]> buffer(new char[max_len]);
+ GLint len = 0;
+ glGetTranslatedShaderSourceANGLE(
+ service_id_, max_len, &len, buffer.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || buffer[len] == '\0');
+ translated_source_ = std::string(buffer.get(), len);
+ }
+
+ GLint status = GL_FALSE;
+ glGetShaderiv(service_id_, GL_COMPILE_STATUS, &status);
+ if (status != GL_TRUE) {
+ // We cannot reach here if we are using the shader translator.
+ // All invalid shaders must be rejected by the translator.
+ // All translated shaders must compile.
+ GLint max_len = 0;
+ glGetShaderiv(service_id_, GL_INFO_LOG_LENGTH, &max_len);
+ scoped_ptr<char[]> buffer(new char[max_len]);
+ GLint len = 0;
+ glGetShaderInfoLog(service_id_, max_len, &len, buffer.get());
+ DCHECK(max_len == 0 || len < max_len);
+ DCHECK(len == 0 || buffer[len] == '\0');
+ valid_ = false;
+ log_info_ = std::string(buffer.get(), len);
+ LOG_IF(ERROR, translator)
+ << "Shader translator allowed/produced an invalid shader "
+ << "unless the driver is buggy:"
+ << "\n--original-shader--\n" << source_
+ << "\n--translated-shader--\n" << source_for_driver
+ << "\n--info-log--\n" << log_info_;
+ }
+}
+
+void Shader::IncUseCount() {
+ ++use_count_;
+}
+
+void Shader::DecUseCount() {
+ --use_count_;
+ DCHECK_GE(use_count_, 0);
+}
+
+void Shader::MarkAsDeleted() {
+ DCHECK_NE(service_id_, 0u);
+ service_id_ = 0;
+}
+
+const Shader::VariableInfo* Shader::GetAttribInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = attrib_map_.find(name);
+ return it != attrib_map_.end() ? &it->second : NULL;
+}
+
+const std::string* Shader::GetAttribMappedName(
+ const std::string& original_name) const {
+ for (VariableMap::const_iterator it = attrib_map_.begin();
+ it != attrib_map_.end(); ++it) {
+ if (it->second.name == original_name)
+ return &(it->first);
+ }
+ return NULL;
+}
+
+const std::string* Shader::GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const {
+ NameMap::const_iterator it = name_map_.find(hashed_name);
+ if (it != name_map_.end())
+ return &(it->second);
+ return NULL;
+}
+
+const Shader::VariableInfo* Shader::GetUniformInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = uniform_map_.find(name);
+ return it != uniform_map_.end() ? &it->second : NULL;
+}
+
+const Shader::VariableInfo* Shader::GetVaryingInfo(
+ const std::string& name) const {
+ VariableMap::const_iterator it = varying_map_.find(name);
+ return it != varying_map_.end() ? &it->second : NULL;
+}
+
+ShaderManager::ShaderManager() {}
+
+ShaderManager::~ShaderManager() {
+ DCHECK(shaders_.empty());
+}
+
+void ShaderManager::Destroy(bool have_context) {
+ while (!shaders_.empty()) {
+ if (have_context) {
+ Shader* shader = shaders_.begin()->second.get();
+ if (!shader->IsDeleted()) {
+ glDeleteShader(shader->service_id());
+ shader->MarkAsDeleted();
+ }
+ }
+ shaders_.erase(shaders_.begin());
+ }
+}
+
+Shader* ShaderManager::CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type) {
+ std::pair<ShaderMap::iterator, bool> result =
+ shaders_.insert(std::make_pair(
+ client_id, scoped_refptr<Shader>(
+ new Shader(service_id, shader_type))));
+ DCHECK(result.second);
+ return result.first->second.get();
+}
+
+Shader* ShaderManager::GetShader(GLuint client_id) {
+ ShaderMap::iterator it = shaders_.find(client_id);
+ return it != shaders_.end() ? it->second.get() : NULL;
+}
+
+bool ShaderManager::GetClientId(GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (ShaderMap::const_iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ShaderManager::IsOwned(Shader* shader) {
+ for (ShaderMap::iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second.get() == shader) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ShaderManager::RemoveShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ if (shader->IsDeleted() && !shader->InUse()) {
+ for (ShaderMap::iterator it = shaders_.begin();
+ it != shaders_.end(); ++it) {
+ if (it->second.get() == shader) {
+ shaders_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+}
+
+void ShaderManager::MarkAsDeleted(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->MarkAsDeleted();
+ RemoveShader(shader);
+}
+
+void ShaderManager::UseShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->IncUseCount();
+}
+
+void ShaderManager::UnuseShader(Shader* shader) {
+ DCHECK(shader);
+ DCHECK(IsOwned(shader));
+ shader->DecUseCount();
+ RemoveShader(shader);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/shader_manager.h b/gpu/command_buffer/service/shader_manager.h
new file mode 100644
index 0000000..359e574
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager.h
@@ -0,0 +1,221 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This is used to keep the source code for a shader. This is because in order
+// to emluate GLES2 the shaders will have to be re-written before passed to
+// the underlying OpenGL. But, when the user calls glGetShaderSource they
+// should get the source they passed in, not the re-written source.
+class GPU_EXPORT Shader : public base::RefCounted<Shader> {
+ public:
+ enum TranslatedShaderSourceType {
+ kANGLE,
+ kGL, // GL or GLES
+ };
+
+ typedef ShaderTranslator::VariableInfo VariableInfo;
+
+ void DoCompile(ShaderTranslatorInterface* translator,
+ TranslatedShaderSourceType type);
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ GLenum shader_type() const {
+ return shader_type_;
+ }
+
+ const std::string& source() const {
+ return source_;
+ }
+
+ void set_source(const std::string& source) {
+ source_ = source;
+ }
+
+ const std::string& translated_source() const {
+ return translated_source_;
+ }
+
+ const std::string& signature_source() const {
+ return signature_source_;
+ }
+
+ const VariableInfo* GetAttribInfo(const std::string& name) const;
+ const VariableInfo* GetUniformInfo(const std::string& name) const;
+ const VariableInfo* GetVaryingInfo(const std::string& name) const;
+
+ // If the original_name is not found, return NULL.
+ const std::string* GetAttribMappedName(
+ const std::string& original_name) const;
+
+ // If the hashed_name is not found, return NULL.
+ const std::string* GetOriginalNameFromHashedName(
+ const std::string& hashed_name) const;
+
+ const std::string& log_info() const {
+ return log_info_;
+ }
+
+ bool valid() const {
+ return valid_;
+ }
+
+ bool IsDeleted() const {
+ return service_id_ == 0;
+ }
+
+ bool InUse() const {
+ DCHECK_GE(use_count_, 0);
+ return use_count_ != 0;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& attrib_map() const {
+ return attrib_map_;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& uniform_map() const {
+ return uniform_map_;
+ }
+
+ // Used by program cache.
+ const ShaderTranslator::VariableMap& varying_map() const {
+ return varying_map_;
+ }
+
+ // Used by program cache.
+ void set_attrib_map(const ShaderTranslator::VariableMap& attrib_map) {
+ // copied because cache might be cleared
+ attrib_map_ = ShaderTranslator::VariableMap(attrib_map);
+ }
+
+ // Used by program cache.
+ void set_uniform_map(const ShaderTranslator::VariableMap& uniform_map) {
+ // copied because cache might be cleared
+ uniform_map_ = ShaderTranslator::VariableMap(uniform_map);
+ }
+
+ // Used by program cache.
+ void set_varying_map(const ShaderTranslator::VariableMap& varying_map) {
+ // copied because cache might be cleared
+ varying_map_ = ShaderTranslator::VariableMap(varying_map);
+ }
+
+ private:
+ typedef ShaderTranslator::VariableMap VariableMap;
+ typedef ShaderTranslator::NameMap NameMap;
+
+ friend class base::RefCounted<Shader>;
+ friend class ShaderManager;
+
+ Shader(GLuint service_id, GLenum shader_type);
+ ~Shader();
+
+ void IncUseCount();
+ void DecUseCount();
+ void MarkAsDeleted();
+
+ int use_count_;
+
+ // The shader this Shader is tracking.
+ GLuint service_id_;
+ // Type of shader - GL_VERTEX_SHADER or GL_FRAGMENT_SHADER.
+ GLenum shader_type_;
+
+ // True if compilation succeeded.
+ bool valid_;
+
+ // The shader source as passed to glShaderSource.
+ std::string source_;
+
+ // The source the last compile used.
+ std::string signature_source_;
+
+ // The translated shader source.
+ std::string translated_source_;
+
+ // The shader translation log.
+ std::string log_info_;
+
+ // The type info when the shader was last compiled.
+ VariableMap attrib_map_;
+ VariableMap uniform_map_;
+ VariableMap varying_map_;
+
+ // The name hashing info when the shader was last compiled.
+ NameMap name_map_;
+};
+
+// Tracks the Shaders.
+//
+// NOTE: To support shared resources an instance of this class will
+// need to be shared by multiple GLES2Decoders.
+class GPU_EXPORT ShaderManager {
+ public:
+ ShaderManager();
+ ~ShaderManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a shader for the given shader ID.
+ Shader* CreateShader(
+ GLuint client_id,
+ GLuint service_id,
+ GLenum shader_type);
+
+ // Gets an existing shader info for the given shader ID. Returns NULL if none
+ // exists.
+ Shader* GetShader(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ void MarkAsDeleted(Shader* shader);
+
+ // Mark a shader as used
+ void UseShader(Shader* shader);
+
+ // Unmark a shader as used. If it has been deleted and is not used
+ // then we free the shader.
+ void UnuseShader(Shader* shader);
+
+ // Check if a Shader is owned by this ShaderManager.
+ bool IsOwned(Shader* shader);
+
+ private:
+ friend class Shader;
+
+ // Info for each shader by service side shader Id.
+ typedef base::hash_map<GLuint, scoped_refptr<Shader> > ShaderMap;
+ ShaderMap shaders_;
+
+ void RemoveShader(Shader* shader);
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/shader_manager_unittest.cc b/gpu/command_buffer/service/shader_manager_unittest.cc
new file mode 100644
index 0000000..d6236f7
--- /dev/null
+++ b/gpu/command_buffer/service/shader_manager_unittest.cc
@@ -0,0 +1,272 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Return;
+using ::testing::ReturnRef;
+
+namespace gpu {
+namespace gles2 {
+
+class ShaderManagerTest : public GpuServiceTest {
+ public:
+ ShaderManagerTest() {
+ }
+
+ virtual ~ShaderManagerTest() {
+ manager_.Destroy(false);
+ }
+
+ protected:
+ ShaderManager manager_;
+};
+
+TEST_F(ShaderManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ const GLuint kClient2Id = 2;
+ // Check we can create shader.
+ Shader* info0 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(info0 != NULL);
+ Shader* shader1 = manager_.GetShader(kClient1Id);
+ ASSERT_EQ(info0, shader1);
+ // Check we get nothing for a non-existent shader.
+ EXPECT_TRUE(manager_.GetShader(kClient2Id) == NULL);
+ // Check we can't get the shader after we remove it.
+ manager_.MarkAsDeleted(shader1);
+ EXPECT_TRUE(manager_.GetShader(kClient1Id) == NULL);
+}
+
+TEST_F(ShaderManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_CALL(*gl_, DeleteShader(kService1Id))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager_.Destroy(true);
+ // Check that resources got freed.
+ shader1 = manager_.GetShader(kClient1Id);
+ ASSERT_TRUE(shader1 == NULL);
+}
+
+TEST_F(ShaderManagerTest, DeleteBug) {
+ const GLuint kClient1Id = 1;
+ const GLuint kClient2Id = 2;
+ const GLuint kService1Id = 11;
+ const GLuint kService2Id = 12;
+ const GLenum kShaderType = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ scoped_refptr<Shader> shader1(
+ manager_.CreateShader(kClient1Id, kService1Id, kShaderType));
+ scoped_refptr<Shader> shader2(
+ manager_.CreateShader(kClient2Id, kService2Id, kShaderType));
+ ASSERT_TRUE(shader1.get());
+ ASSERT_TRUE(shader2.get());
+ manager_.UseShader(shader1.get());
+ manager_.MarkAsDeleted(shader1.get());
+ manager_.MarkAsDeleted(shader2.get());
+ EXPECT_TRUE(manager_.IsOwned(shader1.get()));
+ EXPECT_FALSE(manager_.IsOwned(shader2.get()));
+}
+
+TEST_F(ShaderManagerTest, DoCompile) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ const char* kClient1Source = "hello world";
+ const GLenum kAttrib1Type = GL_FLOAT_VEC2;
+ const GLsizei kAttrib1Size = 2;
+ const int kAttrib1Precision = SH_PRECISION_MEDIUMP;
+ const char* kAttrib1Name = "attr1";
+ const GLenum kAttrib2Type = GL_FLOAT_VEC3;
+ const GLsizei kAttrib2Size = 4;
+ const int kAttrib2Precision = SH_PRECISION_HIGHP;
+ const char* kAttrib2Name = "attr2";
+ const int kAttribStaticUse = 0;
+ const GLenum kUniform1Type = GL_FLOAT_MAT2;
+ const GLsizei kUniform1Size = 3;
+ const int kUniform1Precision = SH_PRECISION_LOWP;
+ const int kUniform1StaticUse = 1;
+ const char* kUniform1Name = "uni1";
+ const GLenum kUniform2Type = GL_FLOAT_MAT3;
+ const GLsizei kUniform2Size = 5;
+ const int kUniform2Precision = SH_PRECISION_MEDIUMP;
+ const int kUniform2StaticUse = 0;
+ const char* kUniform2Name = "uni2";
+ const GLenum kVarying1Type = GL_FLOAT_VEC4;
+ const GLsizei kVarying1Size = 1;
+ const int kVarying1Precision = SH_PRECISION_HIGHP;
+ const int kVarying1StaticUse = 0;
+ const char* kVarying1Name = "varying1";
+
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_EQ(kService1Id, shader1->service_id());
+ // Check if the shader has correct type.
+ EXPECT_EQ(kShader1Type, shader1->shader_type());
+ EXPECT_FALSE(shader1->valid());
+ EXPECT_FALSE(shader1->InUse());
+ EXPECT_TRUE(shader1->source().empty());
+ EXPECT_TRUE(shader1->log_info().empty());
+ EXPECT_TRUE(shader1->signature_source().empty());
+ EXPECT_TRUE(shader1->translated_source().empty());
+ EXPECT_EQ(0u, shader1->attrib_map().size());
+ EXPECT_EQ(0u, shader1->uniform_map().size());
+ EXPECT_EQ(0u, shader1->varying_map().size());
+
+ // Check we can set its source.
+ shader1->set_source(kClient1Source);
+ EXPECT_STREQ(kClient1Source, shader1->source().c_str());
+ EXPECT_TRUE(shader1->signature_source().empty());
+
+ // Check DoCompile() will set compilation states, log, translated source,
+ // shader variables, and name mapping.
+ const std::string kLog = "foo";
+ const std::string kTranslatedSource = "poo";
+
+ ShaderTranslator::VariableMap attrib_map;
+ attrib_map[kAttrib1Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib1Type, kAttrib1Size, kAttrib1Precision,
+ kAttribStaticUse, kAttrib1Name);
+ attrib_map[kAttrib2Name] = ShaderTranslatorInterface::VariableInfo(
+ kAttrib2Type, kAttrib2Size, kAttrib2Precision,
+ kAttribStaticUse, kAttrib2Name);
+ ShaderTranslator::VariableMap uniform_map;
+ uniform_map[kUniform1Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform1Type, kUniform1Size, kUniform1Precision,
+ kUniform1StaticUse, kUniform1Name);
+ uniform_map[kUniform2Name] = ShaderTranslatorInterface::VariableInfo(
+ kUniform2Type, kUniform2Size, kUniform2Precision,
+ kUniform2StaticUse, kUniform2Name);
+ ShaderTranslator::VariableMap varying_map;
+ varying_map[kVarying1Name] = ShaderTranslatorInterface::VariableInfo(
+ kVarying1Type, kVarying1Size, kVarying1Precision,
+ kVarying1StaticUse, kVarying1Name);
+
+ TestHelper::SetShaderStates(
+ gl_.get(), shader1, true, &kLog, &kTranslatedSource,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ EXPECT_TRUE(shader1->valid());
+ // When compilation succeeds, no log is recorded.
+ EXPECT_STREQ("", shader1->log_info().c_str());
+ EXPECT_STREQ(kClient1Source, shader1->signature_source().c_str());
+ EXPECT_STREQ(kTranslatedSource.c_str(), shader1->translated_source().c_str());
+
+ // Check varying infos got copied.
+ EXPECT_EQ(attrib_map.size(), shader1->attrib_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = attrib_map.begin();
+ it != attrib_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetAttribInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+ // Check uniform infos got copied.
+ EXPECT_EQ(uniform_map.size(), shader1->uniform_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = uniform_map.begin();
+ it != uniform_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetUniformInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+ // Check varying infos got copied.
+ EXPECT_EQ(varying_map.size(), shader1->varying_map().size());
+ for (ShaderTranslator::VariableMap::const_iterator it = varying_map.begin();
+ it != varying_map.end(); ++it) {
+ const Shader::VariableInfo* variable_info =
+ shader1->GetVaryingInfo(it->first);
+ ASSERT_TRUE(variable_info != NULL);
+ EXPECT_EQ(it->second.type, variable_info->type);
+ EXPECT_EQ(it->second.size, variable_info->size);
+ EXPECT_EQ(it->second.precision, variable_info->precision);
+ EXPECT_EQ(it->second.static_use, variable_info->static_use);
+ EXPECT_STREQ(it->second.name.c_str(), variable_info->name.c_str());
+ }
+
+ // Compile failure case.
+ TestHelper::SetShaderStates(
+ gl_.get(), shader1, false, &kLog, &kTranslatedSource,
+ &attrib_map, &uniform_map, &varying_map, NULL);
+ EXPECT_FALSE(shader1->valid());
+ EXPECT_STREQ(kLog.c_str(), shader1->log_info().c_str());
+ EXPECT_STREQ("", shader1->translated_source().c_str());
+ EXPECT_TRUE(shader1->attrib_map().empty());
+ EXPECT_TRUE(shader1->uniform_map().empty());
+ EXPECT_TRUE(shader1->varying_map().empty());
+}
+
+TEST_F(ShaderManagerTest, ShaderInfoUseCount) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLenum kShader1Type = GL_VERTEX_SHADER;
+ // Check we can create shader.
+ Shader* shader1 = manager_.CreateShader(
+ kClient1Id, kService1Id, kShader1Type);
+ // Check shader got created.
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_FALSE(shader1->InUse());
+ EXPECT_FALSE(shader1->IsDeleted());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.MarkAsDeleted(shader1);
+ EXPECT_TRUE(shader1->IsDeleted());
+ Shader* shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_EQ(shader1, shader2);
+ manager_.UnuseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1); // this should delete the info.
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_TRUE(shader2 == NULL);
+
+ shader1 = manager_.CreateShader(kClient1Id, kService1Id, kShader1Type);
+ ASSERT_TRUE(shader1 != NULL);
+ EXPECT_FALSE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1);
+ EXPECT_TRUE(shader1->InUse());
+ manager_.UnuseShader(shader1);
+ EXPECT_FALSE(shader1->InUse());
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_EQ(shader1, shader2);
+ manager_.MarkAsDeleted(shader1); // this should delete the shader.
+ shader2 = manager_.GetShader(kClient1Id);
+ EXPECT_TRUE(shader2 == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/shader_translator.cc b/gpu/command_buffer/service/shader_translator.cc
new file mode 100644
index 0000000..bc06ab3
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shader_translator.h"
+
+#include <string.h>
+#include <GLES2/gl2.h>
+#include <algorithm>
+
+#include "base/at_exit.h"
+#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace {
+
+using gpu::gles2::ShaderTranslator;
+
+class ShaderTranslatorInitializer {
+ public:
+ ShaderTranslatorInitializer() {
+ TRACE_EVENT0("gpu", "ShInitialize");
+ CHECK(ShInitialize());
+ }
+
+ ~ShaderTranslatorInitializer() {
+ TRACE_EVENT0("gpu", "ShFinalize");
+ ShFinalize();
+ }
+};
+
+base::LazyInstance<ShaderTranslatorInitializer> g_translator_initializer =
+ LAZY_INSTANCE_INITIALIZER;
+
+void GetVariableInfo(ShHandle compiler, ShShaderInfo var_type,
+ ShaderTranslator::VariableMap* var_map) {
+ if (!var_map)
+ return;
+ var_map->clear();
+
+ size_t name_len = 0, mapped_name_len = 0;
+ switch (var_type) {
+ case SH_ACTIVE_ATTRIBUTES:
+ ShGetInfo(compiler, SH_ACTIVE_ATTRIBUTE_MAX_LENGTH, &name_len);
+ break;
+ case SH_ACTIVE_UNIFORMS:
+ ShGetInfo(compiler, SH_ACTIVE_UNIFORM_MAX_LENGTH, &name_len);
+ break;
+ case SH_VARYINGS:
+ ShGetInfo(compiler, SH_VARYING_MAX_LENGTH, &name_len);
+ break;
+ default: NOTREACHED();
+ }
+ ShGetInfo(compiler, SH_MAPPED_NAME_MAX_LENGTH, &mapped_name_len);
+ if (name_len <= 1 || mapped_name_len <= 1) return;
+ scoped_ptr<char[]> name(new char[name_len]);
+ scoped_ptr<char[]> mapped_name(new char[mapped_name_len]);
+
+ size_t num_vars = 0;
+ ShGetInfo(compiler, var_type, &num_vars);
+ for (size_t i = 0; i < num_vars; ++i) {
+ size_t len = 0;
+ int size = 0;
+ sh::GLenum type = GL_NONE;
+ ShPrecisionType precision = SH_PRECISION_UNDEFINED;
+ int static_use = 0;
+
+ ShGetVariableInfo(compiler, var_type, i,
+ &len, &size, &type, &precision, &static_use,
+ name.get(), mapped_name.get());
+
+ // In theory we should CHECK(len <= name_len - 1) here, but ANGLE needs
+ // to handle long struct field name mapping before we can do this.
+ // Also, we should modify the ANGLE interface to also return a length
+ // for mapped_name.
+ std::string name_string(name.get(), std::min(len, name_len - 1));
+ mapped_name.get()[mapped_name_len - 1] = '\0';
+
+ ShaderTranslator::VariableInfo info(
+ type, size, precision, static_use, name_string);
+ (*var_map)[mapped_name.get()] = info;
+ }
+}
+
+void GetNameHashingInfo(
+ ShHandle compiler, ShaderTranslator::NameMap* name_map) {
+ if (!name_map)
+ return;
+ name_map->clear();
+
+ size_t hashed_names_count = 0;
+ ShGetInfo(compiler, SH_HASHED_NAMES_COUNT, &hashed_names_count);
+ if (hashed_names_count == 0)
+ return;
+
+ size_t name_max_len = 0, hashed_name_max_len = 0;
+ ShGetInfo(compiler, SH_NAME_MAX_LENGTH, &name_max_len);
+ ShGetInfo(compiler, SH_HASHED_NAME_MAX_LENGTH, &hashed_name_max_len);
+
+ scoped_ptr<char[]> name(new char[name_max_len]);
+ scoped_ptr<char[]> hashed_name(new char[hashed_name_max_len]);
+
+ for (size_t i = 0; i < hashed_names_count; ++i) {
+ ShGetNameHashingEntry(compiler, i, name.get(), hashed_name.get());
+ (*name_map)[hashed_name.get()] = name.get();
+ }
+}
+
+} // namespace
+
+namespace gpu {
+namespace gles2 {
+
+ShaderTranslator::DestructionObserver::DestructionObserver() {
+}
+
+ShaderTranslator::DestructionObserver::~DestructionObserver() {
+}
+
+ShaderTranslator::ShaderTranslator()
+ : compiler_(NULL),
+ implementation_is_glsl_es_(false),
+ driver_bug_workarounds_(static_cast<ShCompileOptions>(0)) {
+}
+
+bool ShaderTranslator::Init(
+ GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) {
+ // Make sure Init is called only once.
+ DCHECK(compiler_ == NULL);
+ DCHECK(shader_type == GL_FRAGMENT_SHADER || shader_type == GL_VERTEX_SHADER);
+ DCHECK(shader_spec == SH_GLES2_SPEC || shader_spec == SH_WEBGL_SPEC);
+ DCHECK(resources != NULL);
+
+ g_translator_initializer.Get();
+
+ ShShaderOutput shader_output =
+ (glsl_implementation_type == kGlslES ? SH_ESSL_OUTPUT : SH_GLSL_OUTPUT);
+
+ {
+ TRACE_EVENT0("gpu", "ShConstructCompiler");
+ compiler_ = ShConstructCompiler(
+ shader_type, shader_spec, shader_output, resources);
+ }
+ compiler_options_ = *resources;
+ implementation_is_glsl_es_ = (glsl_implementation_type == kGlslES);
+ driver_bug_workarounds_ = driver_bug_workarounds;
+ return compiler_ != NULL;
+}
+
+int ShaderTranslator::GetCompileOptions() const {
+ int compile_options =
+ SH_OBJECT_CODE | SH_VARIABLES | SH_ENFORCE_PACKING_RESTRICTIONS |
+ SH_LIMIT_EXPRESSION_COMPLEXITY | SH_LIMIT_CALL_STACK_DEPTH |
+ SH_CLAMP_INDIRECT_ARRAY_BOUNDS;
+
+ compile_options |= driver_bug_workarounds_;
+
+ return compile_options;
+}
+
+bool ShaderTranslator::Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const {
+ // Make sure this instance is initialized.
+ DCHECK(compiler_ != NULL);
+
+ bool success = false;
+ {
+ TRACE_EVENT0("gpu", "ShCompile");
+ const char* const shader_strings[] = { shader_source.c_str() };
+ success = !!ShCompile(
+ compiler_, shader_strings, 1, GetCompileOptions());
+ }
+ if (success) {
+ if (translated_source) {
+ translated_source->clear();
+ // Get translated shader.
+ size_t obj_code_len = 0;
+ ShGetInfo(compiler_, SH_OBJECT_CODE_LENGTH, &obj_code_len);
+ if (obj_code_len > 1) {
+ scoped_ptr<char[]> buffer(new char[obj_code_len]);
+ ShGetObjectCode(compiler_, buffer.get());
+ *translated_source = std::string(buffer.get(), obj_code_len - 1);
+ }
+ }
+ // Get info for attribs, uniforms, and varyings.
+ GetVariableInfo(compiler_, SH_ACTIVE_ATTRIBUTES, attrib_map);
+ GetVariableInfo(compiler_, SH_ACTIVE_UNIFORMS, uniform_map);
+ GetVariableInfo(compiler_, SH_VARYINGS, varying_map);
+ // Get info for name hashing.
+ GetNameHashingInfo(compiler_, name_map);
+ }
+
+ // Get info log.
+ if (info_log) {
+ info_log->clear();
+ size_t info_log_len = 0;
+ ShGetInfo(compiler_, SH_INFO_LOG_LENGTH, &info_log_len);
+ if (info_log_len > 1) {
+ scoped_ptr<char[]> buffer(new char[info_log_len]);
+ ShGetInfoLog(compiler_, buffer.get());
+ *info_log = std::string(buffer.get(), info_log_len - 1);
+ }
+ }
+
+ return success;
+}
+
+std::string ShaderTranslator::GetStringForOptionsThatWouldAffectCompilation()
+ const {
+ DCHECK(compiler_ != NULL);
+
+ size_t resource_len = 0;
+ ShGetInfo(compiler_, SH_RESOURCES_STRING_LENGTH, &resource_len);
+ DCHECK(resource_len > 1);
+ scoped_ptr<char[]> resource_str(new char[resource_len]);
+
+ ShGetBuiltInResourcesString(compiler_, resource_len, resource_str.get());
+
+ return std::string(":CompileOptions:" +
+ base::IntToString(GetCompileOptions())) +
+ std::string(resource_str.get());
+}
+
+void ShaderTranslator::AddDestructionObserver(
+ DestructionObserver* observer) {
+ destruction_observers_.AddObserver(observer);
+}
+
+void ShaderTranslator::RemoveDestructionObserver(
+ DestructionObserver* observer) {
+ destruction_observers_.RemoveObserver(observer);
+}
+
+ShaderTranslator::~ShaderTranslator() {
+ FOR_EACH_OBSERVER(DestructionObserver,
+ destruction_observers_,
+ OnDestruct(this));
+
+ if (compiler_ != NULL)
+ ShDestruct(compiler_);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/shader_translator.h b/gpu/command_buffer/service/shader_translator.h
new file mode 100644
index 0000000..77e04ab
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/observer_list.h"
+#include "gpu/gpu_export.h"
+#include "third_party/angle/include/GLSLANG/ShaderLang.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Translates a GLSL ES 2.0 shader to desktop GLSL shader, or just
+// validates GLSL ES 2.0 shaders on a true GLSL ES implementation.
+class ShaderTranslatorInterface {
+ public:
+ enum GlslImplementationType {
+ kGlsl,
+ kGlslES
+ };
+
+ struct VariableInfo {
+ VariableInfo()
+ : type(0),
+ size(0),
+ precision(SH_PRECISION_UNDEFINED),
+ static_use(0) {
+ }
+
+ VariableInfo(int _type, int _size, int _precision,
+ int _static_use, std::string _name)
+ : type(_type),
+ size(_size),
+ precision(_precision),
+ static_use(_static_use),
+ name(_name) {
+ }
+ bool operator==(
+ const ShaderTranslatorInterface::VariableInfo& other) const {
+ return type == other.type &&
+ size == other.size &&
+ precision == other.precision &&
+ strcmp(name.c_str(), other.name.c_str()) == 0;
+ }
+
+ int type;
+ int size;
+ int precision;
+ int static_use;
+ std::string name; // name in the original shader source.
+ };
+
+ // Mapping between variable name and info.
+ typedef base::hash_map<std::string, VariableInfo> VariableMap;
+ // Mapping between hashed name and original name.
+ typedef base::hash_map<std::string, std::string> NameMap;
+
+ // Initializes the translator.
+ // Must be called once before using the translator object.
+ virtual bool Init(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) = 0;
+
+ // Translates the given shader source.
+ // Returns true if translation is successful, false otherwise.
+ // Always fill |info_log| if it's non-null.
+ // Upon success, fill |translated_shader|, |attrib_map|, |uniform_map|,
+ // |varying_map|, and |name_map| if they are non-null.
+ virtual bool Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_shader,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const = 0;
+
+ // Return a string that is unique for a specfic set of options that would
+ // possibly affect compilation.
+ virtual std::string GetStringForOptionsThatWouldAffectCompilation() const = 0;
+
+ protected:
+ virtual ~ShaderTranslatorInterface() {}
+};
+
+// Implementation of ShaderTranslatorInterface
+class GPU_EXPORT ShaderTranslator
+ : public base::RefCounted<ShaderTranslator>,
+ NON_EXPORTED_BASE(public ShaderTranslatorInterface) {
+ public:
+ class DestructionObserver {
+ public:
+ DestructionObserver();
+ virtual ~DestructionObserver();
+
+ virtual void OnDestruct(ShaderTranslator* translator) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DestructionObserver);
+ };
+
+ ShaderTranslator();
+
+ // Overridden from ShaderTranslatorInterface.
+ virtual bool Init(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ GlslImplementationType glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) OVERRIDE;
+
+ // Overridden from ShaderTranslatorInterface.
+ virtual bool Translate(const std::string& shader_source,
+ std::string* info_log,
+ std::string* translated_source,
+ VariableMap* attrib_map,
+ VariableMap* uniform_map,
+ VariableMap* varying_map,
+ NameMap* name_map) const OVERRIDE;
+
+ virtual std::string GetStringForOptionsThatWouldAffectCompilation() const
+ OVERRIDE;
+
+ void AddDestructionObserver(DestructionObserver* observer);
+ void RemoveDestructionObserver(DestructionObserver* observer);
+
+ private:
+ friend class base::RefCounted<ShaderTranslator>;
+
+ virtual ~ShaderTranslator();
+ int GetCompileOptions() const;
+
+ ShHandle compiler_;
+ ShBuiltInResources compiler_options_;
+ bool implementation_is_glsl_es_;
+ ShCompileOptions driver_bug_workarounds_;
+ ObserverList<DestructionObserver> destruction_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderTranslator);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
+
diff --git a/gpu/command_buffer/service/shader_translator_cache.cc b/gpu/command_buffer/service/shader_translator_cache.cc
new file mode 100644
index 0000000..631a88e
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_cache.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+
+#include "gpu/command_buffer/service/shader_translator_cache.h"
+
+namespace gpu {
+namespace gles2 {
+
+ShaderTranslatorCache::ShaderTranslatorCache() {
+}
+
+ShaderTranslatorCache::~ShaderTranslatorCache() {
+ DCHECK(cache_.empty());
+}
+
+void ShaderTranslatorCache::OnDestruct(ShaderTranslator* translator) {
+ Cache::iterator it = cache_.begin();
+ while (it != cache_.end()) {
+ if (it->second == translator) {
+ cache_.erase(it);
+ return;
+ }
+ it++;
+ }
+}
+
+scoped_refptr<ShaderTranslator> ShaderTranslatorCache::GetTranslator(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds) {
+ ShaderTranslatorInitParams params(shader_type,
+ shader_spec,
+ *resources,
+ glsl_implementation_type,
+ driver_bug_workarounds);
+
+ Cache::iterator it = cache_.find(params);
+ if (it != cache_.end())
+ return it->second;
+
+ ShaderTranslator* translator = new ShaderTranslator();
+ if (translator->Init(shader_type, shader_spec, resources,
+ glsl_implementation_type,
+ driver_bug_workarounds)) {
+ cache_[params] = translator;
+ translator->AddDestructionObserver(this);
+ return translator;
+ } else {
+ return NULL;
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/shader_translator_cache.h b/gpu/command_buffer/service/shader_translator_cache.h
new file mode 100644
index 0000000..2a272d1
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_cache.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
+
+#include <string.h>
+
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "third_party/angle/include/GLSLANG/ShaderLang.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class is not thread safe and can only be created and destroyed
+// on a single thread. But it is safe to use two independent instances on two
+// threads without synchronization.
+//
+// TODO(backer): Investigate using glReleaseShaderCompiler as an alternative to
+// to this cache.
+class GPU_EXPORT ShaderTranslatorCache
+ : public base::RefCounted<ShaderTranslatorCache>,
+ public NON_EXPORTED_BASE(ShaderTranslator::DestructionObserver) {
+ public:
+ ShaderTranslatorCache();
+
+ // ShaderTranslator::DestructionObserver implementation
+ virtual void OnDestruct(ShaderTranslator* translator) OVERRIDE;
+
+ scoped_refptr<ShaderTranslator> GetTranslator(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources* resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds);
+
+ private:
+ friend class base::RefCounted<ShaderTranslatorCache>;
+ virtual ~ShaderTranslatorCache();
+
+ // Parameters passed into ShaderTranslator::Init
+ struct ShaderTranslatorInitParams {
+ sh::GLenum shader_type;
+ ShShaderSpec shader_spec;
+ ShBuiltInResources resources;
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type;
+ ShCompileOptions driver_bug_workarounds;
+
+ ShaderTranslatorInitParams(
+ sh::GLenum shader_type,
+ ShShaderSpec shader_spec,
+ const ShBuiltInResources& resources,
+ ShaderTranslatorInterface::GlslImplementationType
+ glsl_implementation_type,
+ ShCompileOptions driver_bug_workarounds)
+ : shader_type(shader_type),
+ shader_spec(shader_spec),
+ resources(resources),
+ glsl_implementation_type(glsl_implementation_type),
+ driver_bug_workarounds(driver_bug_workarounds) {
+ }
+
+ ShaderTranslatorInitParams(const ShaderTranslatorInitParams& params) {
+ memcpy(this, ¶ms, sizeof(*this));
+ }
+
+ bool operator== (const ShaderTranslatorInitParams& params) const {
+ return memcmp(¶ms, this, sizeof(*this)) == 0;
+ }
+
+ bool operator< (const ShaderTranslatorInitParams& params) const {
+ return memcmp(¶ms, this, sizeof(*this)) < 0;
+ }
+ };
+
+ typedef std::map<ShaderTranslatorInitParams, ShaderTranslator* > Cache;
+ Cache cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderTranslatorCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_CACHE_H_
diff --git a/gpu/command_buffer/service/shader_translator_unittest.cc b/gpu/command_buffer/service/shader_translator_unittest.cc
new file mode 100644
index 0000000..f489626
--- /dev/null
+++ b/gpu/command_buffer/service/shader_translator_unittest.cc
@@ -0,0 +1,323 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ShaderTranslatorTest : public testing::Test {
+ public:
+ ShaderTranslatorTest() {
+ }
+
+ virtual ~ShaderTranslatorTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+ resources.MaxExpressionComplexity = 32;
+ resources.MaxCallStackDepth = 32;
+
+ vertex_translator_ = new ShaderTranslator();
+ fragment_translator_ = new ShaderTranslator();
+
+ ASSERT_TRUE(vertex_translator_->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+ ASSERT_TRUE(fragment_translator_->Init(
+ GL_FRAGMENT_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ static_cast<ShCompileOptions>(0)));
+ }
+ virtual void TearDown() {
+ vertex_translator_ = NULL;
+ fragment_translator_ = NULL;
+ }
+
+ scoped_refptr<ShaderTranslator> vertex_translator_;
+ scoped_refptr<ShaderTranslator> fragment_translator_;
+};
+
+TEST_F(ShaderTranslatorTest, ValidVertexShader) {
+ const char* shader =
+ "void main() {\n"
+ " gl_Position = vec4(1.0);\n"
+ "}";
+
+ // A valid shader should be successfully translated.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ // There should be no name mapping.
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, InvalidVertexShader) {
+ const char* bad_shader = "foo-bar";
+ const char* good_shader =
+ "void main() {\n"
+ " gl_Position = vec4(1.0);\n"
+ "}";
+
+ // An invalid shader should fail.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_FALSE(vertex_translator_->Translate(bad_shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be valid and non-empty.
+ ASSERT_FALSE(info_log.empty());
+ // Translated shader must be NULL.
+ EXPECT_TRUE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings, or name mapping.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+
+ // Try a good shader after bad.
+ info_log.clear();
+ EXPECT_TRUE(vertex_translator_->Translate(good_shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ EXPECT_TRUE(info_log.empty());
+ EXPECT_FALSE(translated_source.empty());
+}
+
+TEST_F(ShaderTranslatorTest, ValidFragmentShader) {
+ const char* shader =
+ "void main() {\n"
+ " gl_FragColor = vec4(1.0);\n"
+ "}";
+
+ // A valid shader should be successfully translated.
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ // There should be no attributes, uniforms, varyings, or name mapping.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, InvalidFragmentShader) {
+ const char* shader = "foo-bar";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ // An invalid shader should fail.
+ EXPECT_FALSE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be valid and non-empty.
+ EXPECT_FALSE(info_log.empty());
+ // Translated shader must be NULL.
+ EXPECT_TRUE(translated_source.empty());
+ // There should be no attributes or uniforms.
+ EXPECT_TRUE(attrib_map.empty());
+ EXPECT_TRUE(uniform_map.empty());
+ EXPECT_TRUE(varying_map.empty());
+ EXPECT_TRUE(name_map.empty());
+}
+
+TEST_F(ShaderTranslatorTest, GetAttributes) {
+ const char* shader =
+ "attribute vec4 vPosition;\n"
+ "void main() {\n"
+ " gl_Position = vPosition;\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ EXPECT_FALSE(translated_source.empty());
+ // There should be no uniforms.
+ EXPECT_TRUE(uniform_map.empty());
+ // There should be one attribute with following characteristics:
+ // name:vPosition type:GL_FLOAT_VEC4 size:1.
+ EXPECT_EQ(1u, attrib_map.size());
+ ShaderTranslator::VariableMap::const_iterator iter =
+ attrib_map.find("vPosition");
+ EXPECT_TRUE(iter != attrib_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("vPosition", iter->second.name);
+}
+
+TEST_F(ShaderTranslatorTest, GetUniforms) {
+ const char* shader =
+ "precision mediump float;\n"
+ "struct Foo {\n"
+ " vec4 color[1];\n"
+ "};\n"
+ "struct Bar {\n"
+ " Foo foo;\n"
+ "};\n"
+ "uniform Bar bar[2];\n"
+ "void main() {\n"
+ " gl_FragColor = bar[0].foo.color[0] + bar[1].foo.color[0];\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(fragment_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ EXPECT_FALSE(translated_source.empty());
+ // There should be no attributes.
+ EXPECT_TRUE(attrib_map.empty());
+ // There should be two uniforms with following characteristics:
+ // 1. name:bar[0].foo.color[0] type:GL_FLOAT_VEC4 size:1
+ // 2. name:bar[1].foo.color[0] type:GL_FLOAT_VEC4 size:1
+ EXPECT_EQ(2u, uniform_map.size());
+ // First uniform.
+ ShaderTranslator::VariableMap::const_iterator iter =
+ uniform_map.find("bar[0].foo.color[0]");
+ EXPECT_TRUE(iter != uniform_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("bar[0].foo.color[0]", iter->second.name);
+ // Second uniform.
+ iter = uniform_map.find("bar[1].foo.color[0]");
+ EXPECT_TRUE(iter != uniform_map.end());
+ EXPECT_EQ(GL_FLOAT_VEC4, iter->second.type);
+ EXPECT_EQ(1, iter->second.size);
+ EXPECT_EQ("bar[1].foo.color[0]", iter->second.name);
+}
+
+#if defined(OS_MACOSX)
+TEST_F(ShaderTranslatorTest, BuiltInFunctionEmulation) {
+ // This test might become invalid in the future when ANGLE Translator is no
+ // longer emulate dot(float, float) in Mac, or the emulated function name is
+ // no longer webgl_dot_emu.
+ const char* shader =
+ "void main() {\n"
+ " gl_Position = vec4(dot(1.0, 1.0), 1.0, 1.0, 1.0);\n"
+ "}";
+
+ std::string info_log, translated_source;
+ ShaderTranslatorInterface::VariableMap attrib_map, uniform_map, varying_map;
+ ShaderTranslatorInterface::NameMap name_map;
+ EXPECT_TRUE(vertex_translator_->Translate(shader,
+ &info_log,
+ &translated_source,
+ &attrib_map,
+ &uniform_map,
+ &varying_map,
+ &name_map));
+ // Info log must be NULL.
+ EXPECT_TRUE(info_log.empty());
+ // Translated shader must be valid and non-empty.
+ ASSERT_FALSE(translated_source.empty());
+ EXPECT_TRUE(strstr(translated_source.c_str(),
+ "webgl_dot_emu") != NULL);
+}
+#endif
+
+TEST_F(ShaderTranslatorTest, OptionsString) {
+ scoped_refptr<ShaderTranslator> translator_1 = new ShaderTranslator();
+ scoped_refptr<ShaderTranslator> translator_2 = new ShaderTranslator();
+ scoped_refptr<ShaderTranslator> translator_3 = new ShaderTranslator();
+
+ ShBuiltInResources resources;
+ ShInitBuiltInResources(&resources);
+
+ ASSERT_TRUE(translator_1->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+ ASSERT_TRUE(translator_2->Init(
+ GL_FRAGMENT_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ static_cast<ShCompileOptions>(0)));
+ resources.EXT_draw_buffers = 1;
+ ASSERT_TRUE(translator_3->Init(
+ GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
+ ShaderTranslatorInterface::kGlsl,
+ SH_EMULATE_BUILT_IN_FUNCTIONS));
+
+ std::string options_1(
+ translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_2(
+ translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_3(
+ translator_2->GetStringForOptionsThatWouldAffectCompilation());
+ std::string options_4(
+ translator_3->GetStringForOptionsThatWouldAffectCompilation());
+
+ EXPECT_EQ(options_1, options_2);
+ EXPECT_NE(options_1, options_3);
+ EXPECT_NE(options_1, options_4);
+ EXPECT_NE(options_3, options_4);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc b/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc
new file mode 100644
index 0000000..b59cf5c
--- /dev/null
+++ b/gpu/command_buffer/service/stream_texture_manager_in_process_android.cc
@@ -0,0 +1,170 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+
+namespace {
+
+// Simply wraps a SurfaceTexture reference as a GLImage.
+class GLImageImpl : public gfx::GLImage {
+ public:
+ GLImageImpl(const scoped_refptr<gfx::SurfaceTexture>& surface_texture,
+ const base::Closure& release_callback);
+
+ // implement gfx::GLImage
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual bool BindTexImage(unsigned target) OVERRIDE;
+ virtual void ReleaseTexImage(unsigned target) OVERRIDE;
+ virtual bool CopyTexImage(unsigned target) OVERRIDE;
+ virtual void WillUseTexImage() OVERRIDE;
+ virtual void DidUseTexImage() OVERRIDE {}
+ virtual void WillModifyTexImage() OVERRIDE {}
+ virtual void DidModifyTexImage() OVERRIDE {}
+ virtual bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) OVERRIDE;
+
+ private:
+ virtual ~GLImageImpl();
+
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_;
+ base::Closure release_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImageImpl);
+};
+
+GLImageImpl::GLImageImpl(
+ const scoped_refptr<gfx::SurfaceTexture>& surface_texture,
+ const base::Closure& release_callback)
+ : surface_texture_(surface_texture), release_callback_(release_callback) {}
+
+GLImageImpl::~GLImageImpl() {
+ release_callback_.Run();
+}
+
+void GLImageImpl::Destroy(bool have_context) {
+ NOTREACHED();
+}
+
+gfx::Size GLImageImpl::GetSize() {
+ return gfx::Size();
+}
+
+bool GLImageImpl::BindTexImage(unsigned target) {
+ NOTREACHED();
+ return false;
+}
+
+void GLImageImpl::ReleaseTexImage(unsigned target) {
+ NOTREACHED();
+}
+
+bool GLImageImpl::CopyTexImage(unsigned target) {
+ return false;
+}
+
+void GLImageImpl::WillUseTexImage() {
+ surface_texture_->UpdateTexImage();
+}
+
+bool GLImageImpl::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) {
+ NOTREACHED();
+ return false;
+}
+
+} // anonymous namespace
+
+StreamTextureManagerInProcess::StreamTextureManagerInProcess()
+ : next_id_(1), weak_factory_(this) {}
+
+StreamTextureManagerInProcess::~StreamTextureManagerInProcess() {
+ if (!textures_.empty()) {
+ LOG(WARNING) << "Undestroyed surface textures while tearing down "
+ "StreamTextureManager.";
+ }
+}
+
+GLuint StreamTextureManagerInProcess::CreateStreamTexture(
+ uint32 client_texture_id,
+ gles2::TextureManager* texture_manager) {
+ CalledOnValidThread();
+
+ gles2::TextureRef* texture = texture_manager->GetTexture(client_texture_id);
+
+ if (!texture || (texture->texture()->target() &&
+ texture->texture()->target() != GL_TEXTURE_EXTERNAL_OES)) {
+ return 0;
+ }
+
+ scoped_refptr<gfx::SurfaceTexture> surface_texture(
+ gfx::SurfaceTexture::Create(texture->service_id()));
+
+ uint32 stream_id = next_id_++;
+ base::Closure release_callback =
+ base::Bind(&StreamTextureManagerInProcess::OnReleaseStreamTexture,
+ weak_factory_.GetWeakPtr(), stream_id);
+ scoped_refptr<gfx::GLImage> gl_image(new GLImageImpl(surface_texture,
+ release_callback));
+
+ gfx::Size size = gl_image->GetSize();
+ texture_manager->SetTarget(texture, GL_TEXTURE_EXTERNAL_OES);
+ texture_manager->SetLevelInfo(texture,
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ size.width(),
+ size.height(),
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ texture_manager->SetLevelImage(texture, GL_TEXTURE_EXTERNAL_OES, 0, gl_image);
+
+ {
+ base::AutoLock lock(map_lock_);
+ textures_[stream_id] = surface_texture;
+ }
+
+ if (next_id_ == 0)
+ next_id_++;
+
+ return stream_id;
+}
+
+void StreamTextureManagerInProcess::OnReleaseStreamTexture(uint32 stream_id) {
+ CalledOnValidThread();
+ base::AutoLock lock(map_lock_);
+ textures_.erase(stream_id);
+}
+
+// This can get called from any thread.
+scoped_refptr<gfx::SurfaceTexture>
+StreamTextureManagerInProcess::GetSurfaceTexture(uint32 stream_id) {
+ base::AutoLock lock(map_lock_);
+ TextureMap::const_iterator it = textures_.find(stream_id);
+ if (it != textures_.end())
+ return it->second;
+
+ return NULL;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/stream_texture_manager_in_process_android.h b/gpu/command_buffer/service/stream_texture_manager_in_process_android.h
new file mode 100644
index 0000000..8b507b0
--- /dev/null
+++ b/gpu/command_buffer/service/stream_texture_manager_in_process_android.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
+#define GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
+
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/non_thread_safe.h"
+
+namespace gfx {
+class SurfaceTexture;
+}
+
+namespace gpu {
+
+namespace gles2 {
+class TextureManager;
+}
+
+class StreamTextureManagerInProcess : public base::NonThreadSafe {
+ public:
+ StreamTextureManagerInProcess();
+ ~StreamTextureManagerInProcess();
+
+ uint32 CreateStreamTexture(uint32 client_texture_id,
+ gles2::TextureManager* texture_manager);
+
+ // This method can be called from any thread.
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(uint32 stream_id);
+
+ private:
+ void OnReleaseStreamTexture(uint32 stream_id);
+
+ typedef std::map<uint32, scoped_refptr<gfx::SurfaceTexture> > TextureMap;
+ TextureMap textures_;
+ base::Lock map_lock_;
+ uint32 next_id_;
+
+ base::WeakPtrFactory<StreamTextureManagerInProcess> weak_factory_;
+ DISALLOW_COPY_AND_ASSIGN(StreamTextureManagerInProcess);
+};
+
+} // gpu
+
+#endif // GPU_STREAM_TEXTURE_MANAGER_IN_PROCESS_ANDROID_H_
diff --git a/gpu/command_buffer/service/test_helper.cc b/gpu/command_buffer/service/test_helper.cc
new file mode 100644
index 0000000..2f0e9c9
--- /dev/null
+++ b/gpu/command_buffer/service/test_helper.cc
@@ -0,0 +1,729 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/test_helper.h"
+
+#include <algorithm>
+#include <string>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_tokenizer.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLuint TestHelper::kServiceBlackTexture2dId;
+const GLuint TestHelper::kServiceDefaultTexture2dId;
+const GLuint TestHelper::kServiceBlackTextureCubemapId;
+const GLuint TestHelper::kServiceDefaultTextureCubemapId;
+const GLuint TestHelper::kServiceBlackExternalTextureId;
+const GLuint TestHelper::kServiceDefaultExternalTextureId;
+const GLuint TestHelper::kServiceBlackRectangleTextureId;
+const GLuint TestHelper::kServiceDefaultRectangleTextureId;
+
+const GLint TestHelper::kMaxSamples;
+const GLint TestHelper::kMaxRenderbufferSize;
+const GLint TestHelper::kMaxTextureSize;
+const GLint TestHelper::kMaxCubeMapTextureSize;
+const GLint TestHelper::kNumVertexAttribs;
+const GLint TestHelper::kNumTextureUnits;
+const GLint TestHelper::kMaxTextureImageUnits;
+const GLint TestHelper::kMaxVertexTextureImageUnits;
+const GLint TestHelper::kMaxFragmentUniformVectors;
+const GLint TestHelper::kMaxFragmentUniformComponents;
+const GLint TestHelper::kMaxVaryingVectors;
+const GLint TestHelper::kMaxVaryingFloats;
+const GLint TestHelper::kMaxVertexUniformVectors;
+const GLint TestHelper::kMaxVertexUniformComponents;
+#endif
+
+void TestHelper::SetupTextureInitializationExpectations(
+ ::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures) {
+ InSequence sequence;
+
+ bool needs_initialization = (target != GL_TEXTURE_EXTERNAL_OES);
+ bool needs_faces = (target == GL_TEXTURE_CUBE_MAP);
+
+ static GLuint texture_2d_ids[] = {
+ kServiceBlackTexture2dId,
+ kServiceDefaultTexture2dId };
+ static GLuint texture_cube_map_ids[] = {
+ kServiceBlackTextureCubemapId,
+ kServiceDefaultTextureCubemapId };
+ static GLuint texture_external_oes_ids[] = {
+ kServiceBlackExternalTextureId,
+ kServiceDefaultExternalTextureId };
+ static GLuint texture_rectangle_arb_ids[] = {
+ kServiceBlackRectangleTextureId,
+ kServiceDefaultRectangleTextureId };
+
+ const GLuint* texture_ids = NULL;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_ids = &texture_2d_ids[0];
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_ids = &texture_cube_map_ids[0];
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_ids = &texture_external_oes_ids[0];
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_ids = &texture_rectangle_arb_ids[0];
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ int array_size = use_default_textures ? 2 : 1;
+
+ EXPECT_CALL(*gl, GenTextures(array_size, _))
+ .WillOnce(SetArrayArgument<1>(texture_ids,
+ texture_ids + array_size))
+ .RetiresOnSaturation();
+ for (int ii = 0; ii < array_size; ++ii) {
+ EXPECT_CALL(*gl, BindTexture(target, texture_ids[ii]))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (needs_initialization) {
+ if (needs_faces) {
+ static GLenum faces[] = {
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ };
+ for (size_t ii = 0; ii < arraysize(faces); ++ii) {
+ EXPECT_CALL(*gl, TexImage2D(faces[ii], 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ } else {
+ EXPECT_CALL(*gl, TexImage2D(target, 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ }
+ }
+ EXPECT_CALL(*gl, BindTexture(target, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupTextureManagerInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures) {
+ InSequence sequence;
+
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_2D, use_default_textures);
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_CUBE_MAP, use_default_textures);
+
+ bool ext_image_external = false;
+ bool arb_texture_rectangle = false;
+ base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
+ while (t.GetNext()) {
+ if (t.token() == "GL_OES_EGL_image_external") {
+ ext_image_external = true;
+ break;
+ }
+ if (t.token() == "GL_ARB_texture_rectangle") {
+ arb_texture_rectangle = true;
+ break;
+ }
+ }
+
+ if (ext_image_external) {
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_EXTERNAL_OES, use_default_textures);
+ }
+ if (arb_texture_rectangle) {
+ SetupTextureInitializationExpectations(
+ gl, GL_TEXTURE_RECTANGLE_ARB, use_default_textures);
+ }
+}
+
+void TestHelper::SetupTextureDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures) {
+ if (!use_default_textures)
+ return;
+
+ GLuint texture_id = 0;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture_id = kServiceDefaultTexture2dId;
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ texture_id = kServiceDefaultTextureCubemapId;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture_id = kServiceDefaultExternalTextureId;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture_id = kServiceDefaultRectangleTextureId;
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ EXPECT_CALL(*gl, DeleteTextures(1, Pointee(texture_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupTextureManagerDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures) {
+ SetupTextureDestructionExpectations(gl, GL_TEXTURE_2D, use_default_textures);
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_CUBE_MAP, use_default_textures);
+
+ bool ext_image_external = false;
+ bool arb_texture_rectangle = false;
+ base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
+ while (t.GetNext()) {
+ if (t.token() == "GL_OES_EGL_image_external") {
+ ext_image_external = true;
+ break;
+ }
+ if (t.token() == "GL_ARB_texture_rectangle") {
+ arb_texture_rectangle = true;
+ break;
+ }
+ }
+
+ if (ext_image_external) {
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_EXTERNAL_OES, use_default_textures);
+ }
+ if (arb_texture_rectangle) {
+ SetupTextureDestructionExpectations(
+ gl, GL_TEXTURE_RECTANGLE_ARB, use_default_textures);
+ }
+
+ EXPECT_CALL(*gl, DeleteTextures(4, _))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+void TestHelper::SetupContextGroupInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const DisallowedFeatures& disallowed_features,
+ const char* extensions,
+ const char* gl_version,
+ bool bind_generates_resource) {
+ InSequence sequence;
+
+ SetupFeatureInfoInitExpectationsWithGLVersion(gl, extensions, "", gl_version);
+
+ std::string l_version(base::StringToLowerASCII(std::string(gl_version)));
+ bool is_es3 = (l_version.substr(0, 12) == "opengl es 3.");
+
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_RENDERBUFFER_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxRenderbufferSize))
+ .RetiresOnSaturation();
+ if (strstr(extensions, "GL_EXT_framebuffer_multisample") ||
+ strstr(extensions, "GL_EXT_multisampled_render_to_texture") || is_es3) {
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxSamples))
+ .RetiresOnSaturation();
+ } else if (strstr(extensions, "GL_IMG_multisampled_render_to_texture")) {
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES_IMG, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxSamples))
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
+ .WillOnce(SetArgumentPointee<1>(kNumVertexAttribs))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kNumTextureUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_TEXTURE_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxTextureSize))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxCubeMapTextureSize))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxTextureImageUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVertexTextureImageUnits))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxFragmentUniformComponents))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VARYING_FLOATS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVaryingFloats))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS, _))
+ .WillOnce(SetArgumentPointee<1>(kMaxVertexUniformComponents))
+ .RetiresOnSaturation();
+
+ bool use_default_textures = bind_generates_resource;
+ SetupTextureManagerInitExpectations(gl, extensions, use_default_textures);
+}
+
+void TestHelper::SetupFeatureInfoInitExpectations(
+ ::gfx::MockGLInterface* gl, const char* extensions) {
+ SetupFeatureInfoInitExpectationsWithGLVersion(gl, extensions, "", "");
+}
+
+void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ const char* gl_renderer,
+ const char* gl_version) {
+ InSequence sequence;
+
+ EXPECT_CALL(*gl, GetString(GL_EXTENSIONS))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(extensions)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetString(GL_RENDERER))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(gl_renderer)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetString(GL_VERSION))
+ .WillOnce(Return(reinterpret_cast<const uint8*>(gl_version)))
+ .RetiresOnSaturation();
+
+ std::string l_version(base::StringToLowerASCII(std::string(gl_version)));
+ bool is_es3 = (l_version.substr(0, 12) == "opengl es 3.");
+
+ if (strstr(extensions, "GL_ARB_texture_float") ||
+ (is_es3 && strstr(extensions, "GL_EXT_color_buffer_float"))) {
+ static const GLuint gl_ids[] = {101, 102};
+ const GLsizei width = 16;
+ EXPECT_CALL(*gl, GetIntegerv(GL_FRAMEBUFFER_BINDING, _))
+ .WillOnce(SetArgumentPointee<1>(gl_ids[0]))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetIntegerv(GL_TEXTURE_BINDING_2D, _))
+ .WillOnce(SetArgumentPointee<1>(gl_ids[0]))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GenTextures(1, _))
+ .WillOnce(SetArrayArgument<1>(gl_ids + 1, gl_ids + 2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GenFramebuffersEXT(1, _))
+ .WillOnce(SetArrayArgument<1>(gl_ids + 1, gl_ids + 2))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindTexture(GL_TEXTURE_2D, gl_ids[1]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, width, 0,
+ GL_RGBA, GL_FLOAT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindFramebufferEXT(GL_FRAMEBUFFER, gl_ids[1]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, FramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, gl_ids[1], 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, TexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, width, 0,
+ GL_RGB, GL_FLOAT, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (is_es3) {
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT))
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
+ .WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, DeleteFramebuffersEXT(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindFramebufferEXT(GL_FRAMEBUFFER, gl_ids[0]))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BindTexture(GL_TEXTURE_2D, gl_ids[0]))
+ .Times(1)
+ .RetiresOnSaturation();
+#if DCHECK_IS_ON
+ EXPECT_CALL(*gl, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+#endif
+ }
+}
+
+void TestHelper::SetupExpectationsForClearingUniforms(
+ ::gfx::MockGLInterface* gl, UniformInfo* uniforms, size_t num_uniforms) {
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ switch (info.type) {
+ case GL_FLOAT:
+ EXPECT_CALL(*gl, Uniform1fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC2:
+ EXPECT_CALL(*gl, Uniform2fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC3:
+ EXPECT_CALL(*gl, Uniform3fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_VEC4:
+ EXPECT_CALL(*gl, Uniform4fv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT:
+ case GL_BOOL:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_3D_OES:
+ case GL_SAMPLER_2D_RECT_ARB:
+ EXPECT_CALL(*gl, Uniform1iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC2:
+ case GL_BOOL_VEC2:
+ EXPECT_CALL(*gl, Uniform2iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC3:
+ case GL_BOOL_VEC3:
+ EXPECT_CALL(*gl, Uniform3iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_INT_VEC4:
+ case GL_BOOL_VEC4:
+ EXPECT_CALL(*gl, Uniform4iv(info.real_location, info.size, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT2:
+ EXPECT_CALL(*gl, UniformMatrix2fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT3:
+ EXPECT_CALL(*gl, UniformMatrix3fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ case GL_FLOAT_MAT4:
+ EXPECT_CALL(*gl, UniformMatrix4fv(
+ info.real_location, info.size, false, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+}
+
+void TestHelper::SetupProgramSuccessExpectations(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_LINK_STATUS, _))
+ .WillOnce(SetArgumentPointee<2>(1))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_INFO_LOG_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(0))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_ATTRIBUTES, _))
+ .WillOnce(SetArgumentPointee<2>(num_attribs))
+ .RetiresOnSaturation();
+ size_t max_attrib_len = 0;
+ for (size_t ii = 0; ii < num_attribs; ++ii) {
+ size_t len = strlen(attribs[ii].name) + 1;
+ max_attrib_len = std::max(max_attrib_len, len);
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(max_attrib_len))
+ .RetiresOnSaturation();
+
+ for (size_t ii = 0; ii < num_attribs; ++ii) {
+ const AttribInfo& info = attribs[ii];
+ EXPECT_CALL(*gl,
+ GetActiveAttrib(service_id, ii,
+ max_attrib_len, _, _, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<3>(strlen(info.name)),
+ SetArgumentPointee<4>(info.size),
+ SetArgumentPointee<5>(info.type),
+ SetArrayArgument<6>(info.name,
+ info.name + strlen(info.name) + 1)))
+ .RetiresOnSaturation();
+ if (!ProgramManager::IsInvalidPrefix(info.name, strlen(info.name))) {
+ EXPECT_CALL(*gl, GetAttribLocation(service_id, StrEq(info.name)))
+ .WillOnce(Return(info.location))
+ .RetiresOnSaturation();
+ }
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_UNIFORMS, _))
+ .WillOnce(SetArgumentPointee<2>(num_uniforms))
+ .RetiresOnSaturation();
+
+ size_t max_uniform_len = 0;
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ size_t len = strlen(uniforms[ii].name) + 1;
+ max_uniform_len = std::max(max_uniform_len, len);
+ }
+ EXPECT_CALL(*gl,
+ GetProgramiv(service_id, GL_ACTIVE_UNIFORM_MAX_LENGTH, _))
+ .WillOnce(SetArgumentPointee<2>(max_uniform_len))
+ .RetiresOnSaturation();
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ EXPECT_CALL(*gl,
+ GetActiveUniform(service_id, ii,
+ max_uniform_len, _, _, _, _))
+ .WillOnce(DoAll(
+ SetArgumentPointee<3>(strlen(info.name)),
+ SetArgumentPointee<4>(info.size),
+ SetArgumentPointee<5>(info.type),
+ SetArrayArgument<6>(info.name,
+ info.name + strlen(info.name) + 1)))
+ .RetiresOnSaturation();
+ }
+
+ for (int pass = 0; pass < 2; ++pass) {
+ for (size_t ii = 0; ii < num_uniforms; ++ii) {
+ const UniformInfo& info = uniforms[ii];
+ if (ProgramManager::IsInvalidPrefix(info.name, strlen(info.name))) {
+ continue;
+ }
+ if (pass == 0) {
+ EXPECT_CALL(*gl, GetUniformLocation(service_id, StrEq(info.name)))
+ .WillOnce(Return(info.real_location))
+ .RetiresOnSaturation();
+ }
+ if ((pass == 0 && info.desired_location >= 0) ||
+ (pass == 1 && info.desired_location < 0)) {
+ if (info.size > 1) {
+ std::string base_name = info.name;
+ size_t array_pos = base_name.rfind("[0]");
+ if (base_name.size() > 3 && array_pos == base_name.size() - 3) {
+ base_name = base_name.substr(0, base_name.size() - 3);
+ }
+ for (GLsizei jj = 1; jj < info.size; ++jj) {
+ std::string element_name(
+ std::string(base_name) + "[" + base::IntToString(jj) + "]");
+ EXPECT_CALL(*gl, GetUniformLocation(
+ service_id, StrEq(element_name)))
+ .WillOnce(Return(info.real_location + jj * 2))
+ .RetiresOnSaturation();
+ }
+ }
+ }
+ }
+ }
+}
+
+void TestHelper::SetupShader(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id) {
+ InSequence s;
+
+ EXPECT_CALL(*gl,
+ LinkProgram(service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ SetupProgramSuccessExpectations(
+ gl, attribs, num_attribs, uniforms, num_uniforms, service_id);
+}
+
+void TestHelper::DoBufferData(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ BufferManager* manager, Buffer* buffer, GLsizeiptr size, GLenum usage,
+ const GLvoid* data, GLenum error) {
+ EXPECT_CALL(*error_state, CopyRealGLErrorsToWrapper(_, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (manager->IsUsageClientSideArray(usage)) {
+ EXPECT_CALL(*gl, BufferData(
+ buffer->target(), 0, _, usage))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl, BufferData(
+ buffer->target(), size, _, usage))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*error_state, PeekGLError(_, _, _))
+ .WillOnce(Return(error))
+ .RetiresOnSaturation();
+ manager->DoBufferData(error_state, buffer, size, usage, data);
+}
+
+void TestHelper::SetTexParameteriWithExpectations(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ TextureManager* manager, TextureRef* texture_ref,
+ GLenum pname, GLint value, GLenum error) {
+ if (error == GL_NO_ERROR) {
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ EXPECT_CALL(*gl, TexParameteri(texture_ref->texture()->target(),
+ pname, value))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ } else if (error == GL_INVALID_ENUM) {
+ EXPECT_CALL(*error_state, SetGLErrorInvalidEnum(_, _, _, value, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*error_state, SetGLErrorInvalidParami(_, _, error, _, _, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ manager->SetParameteri("", error_state, texture_ref, pname, value);
+}
+
+// static
+void TestHelper::SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader,
+ bool expected_valid,
+ const std::string* const expected_log_info,
+ const std::string* const expected_translated_source,
+ const ShaderTranslatorInterface::VariableMap* const expected_attrib_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_uniform_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_varying_map,
+ const ShaderTranslatorInterface::NameMap* const expected_name_map) {
+ const std::string empty_log_info;
+ const std::string* log_info = (expected_log_info && !expected_valid) ?
+ expected_log_info : &empty_log_info;
+ const std::string empty_translated_source;
+ const std::string* translated_source =
+ (expected_translated_source && expected_valid) ?
+ expected_translated_source : &empty_translated_source;
+ const ShaderTranslatorInterface::VariableMap empty_attrib_map;
+ const ShaderTranslatorInterface::VariableMap* attrib_map =
+ (expected_attrib_map && expected_valid) ?
+ expected_attrib_map : &empty_attrib_map;
+ const ShaderTranslatorInterface::VariableMap empty_uniform_map;
+ const ShaderTranslatorInterface::VariableMap* uniform_map =
+ (expected_uniform_map && expected_valid) ?
+ expected_uniform_map : &empty_uniform_map;
+ const ShaderTranslatorInterface::VariableMap empty_varying_map;
+ const ShaderTranslatorInterface::VariableMap* varying_map =
+ (expected_varying_map && expected_valid) ?
+ expected_varying_map : &empty_varying_map;
+ const ShaderTranslatorInterface::NameMap empty_name_map;
+ const ShaderTranslatorInterface::NameMap* name_map =
+ (expected_name_map && expected_valid) ?
+ expected_name_map : &empty_name_map;
+
+ MockShaderTranslator translator;
+ EXPECT_CALL(translator, Translate(_,
+ NotNull(), // log_info
+ NotNull(), // translated_source
+ NotNull(), // attrib_map
+ NotNull(), // uniform_map
+ NotNull(), // varying_map
+ NotNull())) // name_map
+ .WillOnce(DoAll(SetArgumentPointee<1>(*log_info),
+ SetArgumentPointee<2>(*translated_source),
+ SetArgumentPointee<3>(*attrib_map),
+ SetArgumentPointee<4>(*uniform_map),
+ SetArgumentPointee<5>(*varying_map),
+ SetArgumentPointee<6>(*name_map),
+ Return(expected_valid)))
+ .RetiresOnSaturation();
+ if (expected_valid) {
+ EXPECT_CALL(*gl, ShaderSource(shader->service_id(), 1, _, NULL))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, CompileShader(shader->service_id()))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, GetShaderiv(shader->service_id(),
+ GL_COMPILE_STATUS,
+ NotNull())) // status
+ .WillOnce(SetArgumentPointee<2>(GL_TRUE))
+ .RetiresOnSaturation();
+ }
+ shader->DoCompile(&translator, Shader::kGL);
+}
+
+// static
+void TestHelper::SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader, bool valid) {
+ SetShaderStates(gl, shader, valid, NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+ScopedGLImplementationSetter::ScopedGLImplementationSetter(
+ gfx::GLImplementation implementation)
+ : old_implementation_(gfx::GetGLImplementation()) {
+ gfx::SetGLImplementation(implementation);
+}
+
+ScopedGLImplementationSetter::~ScopedGLImplementationSetter() {
+ gfx::SetGLImplementation(old_implementation_);
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/test_helper.h b/gpu/command_buffer/service/test_helper.h
new file mode 100644
index 0000000..92e929e
--- /dev/null
+++ b/gpu/command_buffer/service/test_helper.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gpu {
+namespace gles2 {
+
+struct DisallowedFeatures;
+class Buffer;
+class BufferManager;
+class MockErrorState;
+class Shader;
+class TextureRef;
+class TextureManager;
+
+class TestHelper {
+ public:
+ static const GLuint kServiceBlackTexture2dId = 701;
+ static const GLuint kServiceDefaultTexture2dId = 702;
+ static const GLuint kServiceBlackTextureCubemapId = 703;
+ static const GLuint kServiceDefaultTextureCubemapId = 704;
+ static const GLuint kServiceBlackExternalTextureId = 705;
+ static const GLuint kServiceDefaultExternalTextureId = 706;
+ static const GLuint kServiceBlackRectangleTextureId = 707;
+ static const GLuint kServiceDefaultRectangleTextureId = 708;
+
+ static const GLint kMaxSamples = 4;
+ static const GLint kMaxRenderbufferSize = 1024;
+ static const GLint kMaxTextureSize = 2048;
+ static const GLint kMaxCubeMapTextureSize = 256;
+ static const GLint kNumVertexAttribs = 16;
+ static const GLint kNumTextureUnits = 8;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxVertexTextureImageUnits = 2;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxFragmentUniformComponents =
+ kMaxFragmentUniformVectors * 4;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVaryingFloats = kMaxVaryingVectors * 4;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kMaxVertexUniformComponents = kMaxVertexUniformVectors * 4;
+
+ struct AttribInfo {
+ const char* name;
+ GLint size;
+ GLenum type;
+ GLint location;
+ };
+
+ struct UniformInfo {
+ const char* name;
+ GLint size;
+ GLenum type;
+ GLint fake_location;
+ GLint real_location;
+ GLint desired_location;
+ const char* good_name;
+ };
+
+ static void SetupContextGroupInitExpectations(
+ ::gfx::MockGLInterface* gl,
+ const DisallowedFeatures& disallowed_features,
+ const char* extensions,
+ const char* gl_version,
+ bool bind_generates_resource);
+ static void SetupFeatureInfoInitExpectations(
+ ::gfx::MockGLInterface* gl, const char* extensions);
+ static void SetupFeatureInfoInitExpectationsWithGLVersion(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ const char* gl_renderer,
+ const char* gl_version);
+ static void SetupTextureManagerInitExpectations(::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures);
+ static void SetupTextureManagerDestructionExpectations(
+ ::gfx::MockGLInterface* gl,
+ const char* extensions,
+ bool use_default_textures);
+
+ static void SetupExpectationsForClearingUniforms(
+ ::gfx::MockGLInterface* gl, UniformInfo* uniforms, size_t num_uniforms);
+
+ static void SetupShader(
+ ::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id);
+
+ static void SetupProgramSuccessExpectations(::gfx::MockGLInterface* gl,
+ AttribInfo* attribs, size_t num_attribs,
+ UniformInfo* uniforms, size_t num_uniforms,
+ GLuint service_id);
+
+ static void DoBufferData(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ BufferManager* manager, Buffer* buffer, GLsizeiptr size, GLenum usage,
+ const GLvoid* data, GLenum error);
+
+ static void SetTexParameteriWithExpectations(
+ ::gfx::MockGLInterface* gl, MockErrorState* error_state,
+ TextureManager* manager, TextureRef* texture_ref,
+ GLenum pname, GLint value, GLenum error);
+
+ static void SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader,
+ bool expected_valid,
+ const std::string* const expected_log_info,
+ const std::string* const expected_translated_source,
+ const ShaderTranslatorInterface::VariableMap* const expected_attrib_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_uniform_map,
+ const ShaderTranslatorInterface::VariableMap* const expected_varying_map,
+ const ShaderTranslatorInterface::NameMap* const expected_name_map);
+
+ static void SetShaderStates(
+ ::gfx::MockGLInterface* gl, Shader* shader, bool valid);
+
+ private:
+ static void SetupTextureInitializationExpectations(::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures);
+ static void SetupTextureDestructionExpectations(::gfx::MockGLInterface* gl,
+ GLenum target,
+ bool use_default_textures);
+};
+
+// This object temporaritly Sets what gfx::GetGLImplementation returns. During
+// testing the GLImplementation is set to kGLImplemenationMockGL but lots of
+// code branches based on what gfx::GetGLImplementation returns.
+class ScopedGLImplementationSetter {
+ public:
+ explicit ScopedGLImplementationSetter(gfx::GLImplementation implementation);
+ ~ScopedGLImplementationSetter();
+
+ private:
+ gfx::GLImplementation old_implementation_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
+
diff --git a/gpu/command_buffer/service/texture_definition.cc b/gpu/command_buffer/service/texture_definition.cc
new file mode 100644
index 0000000..393dda0
--- /dev/null
+++ b/gpu/command_buffer/service/texture_definition.cc
@@ -0,0 +1,496 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_definition.h"
+
+#include <list>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/scoped_binders.h"
+
+#if !defined(OS_MACOSX)
+#include "ui/gl/gl_fence_egl.h"
+#include "ui/gl/gl_surface_egl.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+class GLImageSync : public gfx::GLImage {
+ public:
+ explicit GLImageSync(const scoped_refptr<NativeImageBuffer>& buffer,
+ const gfx::Size& size);
+
+ // Implement GLImage.
+ virtual void Destroy(bool have_context) OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual bool BindTexImage(unsigned target) OVERRIDE;
+ virtual void ReleaseTexImage(unsigned target) OVERRIDE;
+ virtual bool CopyTexImage(unsigned target) OVERRIDE;
+ virtual void WillUseTexImage() OVERRIDE;
+ virtual void WillModifyTexImage() OVERRIDE;
+ virtual void DidModifyTexImage() OVERRIDE;
+ virtual void DidUseTexImage() OVERRIDE;
+ virtual bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) OVERRIDE;
+
+ protected:
+ virtual ~GLImageSync();
+
+ private:
+ scoped_refptr<NativeImageBuffer> buffer_;
+ gfx::Size size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImageSync);
+};
+
+GLImageSync::GLImageSync(const scoped_refptr<NativeImageBuffer>& buffer,
+ const gfx::Size& size)
+ : buffer_(buffer), size_(size) {
+ if (buffer.get())
+ buffer->AddClient(this);
+}
+
+GLImageSync::~GLImageSync() {
+ if (buffer_.get())
+ buffer_->RemoveClient(this);
+}
+
+void GLImageSync::Destroy(bool have_context) {
+}
+
+gfx::Size GLImageSync::GetSize() {
+ return size_;
+}
+
+bool GLImageSync::BindTexImage(unsigned target) {
+ NOTREACHED();
+ return false;
+}
+
+void GLImageSync::ReleaseTexImage(unsigned target) {
+ NOTREACHED();
+}
+
+bool GLImageSync::CopyTexImage(unsigned target) {
+ return false;
+}
+
+void GLImageSync::WillUseTexImage() {
+ if (buffer_.get())
+ buffer_->WillRead(this);
+}
+
+void GLImageSync::DidUseTexImage() {
+ if (buffer_.get())
+ buffer_->DidRead(this);
+}
+
+void GLImageSync::WillModifyTexImage() {
+ if (buffer_.get())
+ buffer_->WillWrite(this);
+}
+
+void GLImageSync::DidModifyTexImage() {
+ if (buffer_.get())
+ buffer_->DidWrite(this);
+}
+
+bool GLImageSync::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) {
+ NOTREACHED();
+ return false;
+}
+
+#if !defined(OS_MACOSX)
+class NativeImageBufferEGL : public NativeImageBuffer {
+ public:
+ static scoped_refptr<NativeImageBufferEGL> Create(GLuint texture_id);
+
+ private:
+ NativeImageBufferEGL(EGLDisplay display, EGLImageKHR image);
+ virtual ~NativeImageBufferEGL();
+ virtual void AddClient(gfx::GLImage* client) OVERRIDE;
+ virtual void RemoveClient(gfx::GLImage* client) OVERRIDE;
+ virtual bool IsClient(gfx::GLImage* client) OVERRIDE;
+ virtual void BindToTexture(GLenum target) OVERRIDE;
+ virtual void WillRead(gfx::GLImage* client) OVERRIDE;
+ virtual void WillWrite(gfx::GLImage* client) OVERRIDE;
+ virtual void DidRead(gfx::GLImage* client) OVERRIDE;
+ virtual void DidWrite(gfx::GLImage* client) OVERRIDE;
+
+ EGLDisplay egl_display_;
+ EGLImageKHR egl_image_;
+
+ base::Lock lock_;
+
+ struct ClientInfo {
+ ClientInfo(gfx::GLImage* client);
+ ~ClientInfo();
+
+ gfx::GLImage* client;
+ bool needs_wait_before_read;
+ linked_ptr<gfx::GLFence> read_fence;
+ };
+ std::list<ClientInfo> client_infos_;
+ scoped_ptr<gfx::GLFence> write_fence_;
+ gfx::GLImage* write_client_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBufferEGL);
+};
+
+scoped_refptr<NativeImageBufferEGL> NativeImageBufferEGL::Create(
+ GLuint texture_id) {
+ EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ EGLContext egl_context = eglGetCurrentContext();
+
+ DCHECK_NE(EGL_NO_CONTEXT, egl_context);
+ DCHECK_NE(EGL_NO_DISPLAY, egl_display);
+ DCHECK(glIsTexture(texture_id));
+
+ DCHECK(gfx::g_driver_egl.ext.b_EGL_KHR_image_base &&
+ gfx::g_driver_egl.ext.b_EGL_KHR_gl_texture_2D_image &&
+ gfx::g_driver_gl.ext.b_GL_OES_EGL_image &&
+ gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync);
+
+ const EGLint egl_attrib_list[] = {
+ EGL_GL_TEXTURE_LEVEL_KHR, 0, EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE};
+ EGLClientBuffer egl_buffer = reinterpret_cast<EGLClientBuffer>(texture_id);
+ EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR; // TODO
+
+ EGLImageKHR egl_image = eglCreateImageKHR(
+ egl_display, egl_context, egl_target, egl_buffer, egl_attrib_list);
+
+ if (egl_image == EGL_NO_IMAGE_KHR)
+ return NULL;
+
+ return new NativeImageBufferEGL(egl_display, egl_image);
+}
+
+NativeImageBufferEGL::ClientInfo::ClientInfo(gfx::GLImage* client)
+ : client(client), needs_wait_before_read(true) {}
+
+NativeImageBufferEGL::ClientInfo::~ClientInfo() {}
+
+NativeImageBufferEGL::NativeImageBufferEGL(EGLDisplay display,
+ EGLImageKHR image)
+ : NativeImageBuffer(),
+ egl_display_(display),
+ egl_image_(image),
+ write_fence_(new gfx::GLFenceEGL(true)),
+ write_client_(NULL) {
+ DCHECK(egl_display_ != EGL_NO_DISPLAY);
+ DCHECK(egl_image_ != EGL_NO_IMAGE_KHR);
+}
+
+NativeImageBufferEGL::~NativeImageBufferEGL() {
+ DCHECK(client_infos_.empty());
+ if (egl_image_ != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display_, egl_image_);
+}
+
+void NativeImageBufferEGL::AddClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ client_infos_.push_back(ClientInfo(client));
+}
+
+void NativeImageBufferEGL::RemoveClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (write_client_ == client)
+ write_client_ = NULL;
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ client_infos_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+bool NativeImageBufferEGL::IsClient(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client)
+ return true;
+ }
+ return false;
+}
+
+void NativeImageBufferEGL::BindToTexture(GLenum target) {
+ DCHECK(egl_image_ != EGL_NO_IMAGE_KHR);
+ glEGLImageTargetTexture2DOES(target, egl_image_);
+ DCHECK_EQ(static_cast<EGLint>(EGL_SUCCESS), eglGetError());
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+}
+
+void NativeImageBufferEGL::WillRead(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (!write_fence_.get() || write_client_ == client)
+ return;
+
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ if (it->needs_wait_before_read) {
+ it->needs_wait_before_read = false;
+ write_fence_->ServerWait();
+ }
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void NativeImageBufferEGL::WillWrite(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ if (write_client_ != client)
+ write_fence_->ServerWait();
+
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->read_fence.get() && it->client != client)
+ it->read_fence->ServerWait();
+ }
+}
+
+void NativeImageBufferEGL::DidRead(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ if (it->client == client) {
+ it->read_fence = make_linked_ptr(new gfx::GLFenceEGL(true));
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void NativeImageBufferEGL::DidWrite(gfx::GLImage* client) {
+ base::AutoLock lock(lock_);
+ // Sharing semantics require the client to flush in order to make changes
+ // visible to other clients.
+ write_fence_.reset(new gfx::GLFenceEGL(false));
+ write_client_ = client;
+ for (std::list<ClientInfo>::iterator it = client_infos_.begin();
+ it != client_infos_.end();
+ it++) {
+ it->needs_wait_before_read = true;
+ }
+}
+
+#endif
+
+class NativeImageBufferStub : public NativeImageBuffer {
+ public:
+ NativeImageBufferStub() : NativeImageBuffer() {}
+
+ private:
+ virtual ~NativeImageBufferStub() {}
+ virtual void AddClient(gfx::GLImage* client) OVERRIDE {}
+ virtual void RemoveClient(gfx::GLImage* client) OVERRIDE {}
+ virtual bool IsClient(gfx::GLImage* client) OVERRIDE { return true; }
+ virtual void BindToTexture(GLenum target) OVERRIDE {}
+ virtual void WillRead(gfx::GLImage* client) OVERRIDE {}
+ virtual void WillWrite(gfx::GLImage* client) OVERRIDE {}
+ virtual void DidRead(gfx::GLImage* client) OVERRIDE {}
+ virtual void DidWrite(gfx::GLImage* client) OVERRIDE {}
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBufferStub);
+};
+
+} // anonymous namespace
+
+// static
+scoped_refptr<NativeImageBuffer> NativeImageBuffer::Create(GLuint texture_id) {
+ switch (gfx::GetGLImplementation()) {
+#if !defined(OS_MACOSX)
+ case gfx::kGLImplementationEGLGLES2:
+ return NativeImageBufferEGL::Create(texture_id);
+#endif
+ case gfx::kGLImplementationMockGL:
+ return new NativeImageBufferStub;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+TextureDefinition::LevelInfo::LevelInfo(GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared)
+ : target(target),
+ internal_format(internal_format),
+ width(width),
+ height(height),
+ depth(depth),
+ border(border),
+ format(format),
+ type(type),
+ cleared(cleared) {}
+
+TextureDefinition::LevelInfo::~LevelInfo() {}
+
+TextureDefinition::TextureDefinition(
+ GLenum target,
+ Texture* texture,
+ unsigned int version,
+ const scoped_refptr<NativeImageBuffer>& image_buffer)
+ : version_(version),
+ target_(target),
+ image_buffer_(image_buffer.get()
+ ? image_buffer
+ : NativeImageBuffer::Create(texture->service_id())),
+ min_filter_(texture->min_filter()),
+ mag_filter_(texture->mag_filter()),
+ wrap_s_(texture->wrap_s()),
+ wrap_t_(texture->wrap_t()),
+ usage_(texture->usage()),
+ immutable_(texture->IsImmutable()) {
+ // TODO
+ DCHECK(!texture->level_infos_.empty());
+ DCHECK(!texture->level_infos_[0].empty());
+ DCHECK(!texture->NeedsMips());
+ DCHECK(texture->level_infos_[0][0].width);
+ DCHECK(texture->level_infos_[0][0].height);
+
+ scoped_refptr<gfx::GLImage> gl_image(
+ new GLImageSync(image_buffer_,
+ gfx::Size(texture->level_infos_[0][0].width,
+ texture->level_infos_[0][0].height)));
+ texture->SetLevelImage(NULL, target, 0, gl_image.get());
+
+ // TODO: all levels
+ level_infos_.clear();
+ const Texture::LevelInfo& level = texture->level_infos_[0][0];
+ LevelInfo info(level.target,
+ level.internal_format,
+ level.width,
+ level.height,
+ level.depth,
+ level.border,
+ level.format,
+ level.type,
+ level.cleared);
+ std::vector<LevelInfo> infos;
+ infos.push_back(info);
+ level_infos_.push_back(infos);
+}
+
+TextureDefinition::~TextureDefinition() {
+}
+
+Texture* TextureDefinition::CreateTexture() const {
+ if (!image_buffer_.get())
+ return NULL;
+
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+
+ Texture* texture(new Texture(texture_id));
+ UpdateTexture(texture);
+
+ return texture;
+}
+
+void TextureDefinition::UpdateTexture(Texture* texture) const {
+ gfx::ScopedTextureBinder texture_binder(target_, texture->service_id());
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, min_filter_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mag_filter_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_s_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_t_);
+ if (image_buffer_.get())
+ image_buffer_->BindToTexture(target_);
+ // We have to make sure the changes are visible to other clients in this share
+ // group. As far as the clients are concerned, the mailbox semantics only
+ // demand a single flush from the client after changes are first made,
+ // and it is not visible to them when another share group boundary is crossed.
+ // We could probably track this and be a bit smarter about when to flush
+ // though.
+ glFlush();
+
+ texture->level_infos_.resize(1);
+ for (size_t i = 0; i < level_infos_.size(); i++) {
+ const LevelInfo& base_info = level_infos_[i][0];
+ const size_t levels_needed = TextureManager::ComputeMipMapCount(
+ base_info.target, base_info.width, base_info.height, base_info.depth);
+ DCHECK(level_infos_.size() <= levels_needed);
+ texture->level_infos_[0].resize(levels_needed);
+ for (size_t n = 0; n < level_infos_.size(); n++) {
+ const LevelInfo& info = level_infos_[i][n];
+ texture->SetLevelInfo(NULL,
+ info.target,
+ i,
+ info.internal_format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.cleared);
+ }
+ }
+ if (image_buffer_.get()) {
+ texture->SetLevelImage(
+ NULL,
+ target_,
+ 0,
+ new GLImageSync(
+ image_buffer_,
+ gfx::Size(level_infos_[0][0].width, level_infos_[0][0].height)));
+ }
+
+ texture->target_ = target_;
+ texture->SetImmutable(immutable_);
+ texture->min_filter_ = min_filter_;
+ texture->mag_filter_ = mag_filter_;
+ texture->wrap_s_ = wrap_s_;
+ texture->wrap_t_ = wrap_t_;
+ texture->usage_ = usage_;
+}
+
+bool TextureDefinition::Matches(const Texture* texture) const {
+ DCHECK(target_ == texture->target());
+ if (texture->min_filter_ != min_filter_ ||
+ texture->mag_filter_ != mag_filter_ ||
+ texture->wrap_s_ != wrap_s_ ||
+ texture->wrap_t_ != wrap_t_) {
+ return false;
+ }
+
+ // All structural changes should have orphaned the texture.
+ if (image_buffer_.get() && !texture->GetLevelImage(texture->target(), 0))
+ return false;
+
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/texture_definition.h b/gpu/command_buffer/service/texture_definition.h
new file mode 100644
index 0000000..6df4b86
--- /dev/null
+++ b/gpu/command_buffer/service/texture_definition.h
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+
+namespace gfx {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+
+class Texture;
+
+class NativeImageBuffer : public base::RefCountedThreadSafe<NativeImageBuffer> {
+ public:
+ static scoped_refptr<NativeImageBuffer> Create(GLuint texture_id);
+
+ virtual void AddClient(gfx::GLImage* client) = 0;
+ virtual void RemoveClient(gfx::GLImage* client) = 0;
+ virtual bool IsClient(gfx::GLImage* client) = 0;
+ virtual void BindToTexture(GLenum target) = 0;
+ virtual void WillRead(gfx::GLImage* client) = 0;
+ virtual void WillWrite(gfx::GLImage* client) = 0;
+ virtual void DidRead(gfx::GLImage* client) = 0;
+ virtual void DidWrite(gfx::GLImage* client) = 0;
+
+ protected:
+ friend class base::RefCountedThreadSafe<NativeImageBuffer>;
+ NativeImageBuffer() {}
+ virtual ~NativeImageBuffer() {}
+
+ DISALLOW_COPY_AND_ASSIGN(NativeImageBuffer);
+};
+
+// An immutable description that can be used to create a texture that shares
+// the underlying image buffer(s).
+class TextureDefinition {
+ public:
+ TextureDefinition(GLenum target,
+ Texture* texture,
+ unsigned int version,
+ const scoped_refptr<NativeImageBuffer>& image);
+ virtual ~TextureDefinition();
+
+ Texture* CreateTexture() const;
+ void UpdateTexture(Texture* texture) const;
+
+ unsigned int version() const { return version_; }
+ bool IsOlderThan(unsigned int version) const {
+ return (version - version_) < 0x80000000;
+ }
+ bool Matches(const Texture* texture) const;
+
+ scoped_refptr<NativeImageBuffer> image() { return image_buffer_; }
+
+ private:
+ struct LevelInfo {
+ LevelInfo(GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+ ~LevelInfo();
+
+ GLenum target;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ bool cleared;
+ };
+
+ typedef std::vector<std::vector<LevelInfo> > LevelInfos;
+
+ unsigned int version_;
+ GLenum target_;
+ scoped_refptr<NativeImageBuffer> image_buffer_;
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ bool immutable_;
+ LevelInfos level_infos_;
+};
+
+} // namespage gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEXTURE_DEFINITION_H_
diff --git a/gpu/command_buffer/service/texture_manager.cc b/gpu/command_buffer/service/texture_manager.cc
new file mode 100644
index 0000000..bfbdcb1
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager.cc
@@ -0,0 +1,1634 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_manager.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/bits.h"
+#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This should contain everything to uniquely identify a Texture.
+static const char TextureTag[] = "|Texture|";
+struct TextureSignature {
+ GLenum target_;
+ GLint level_;
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ GLenum internal_format_;
+ GLsizei width_;
+ GLsizei height_;
+ GLsizei depth_;
+ GLint border_;
+ GLenum format_;
+ GLenum type_;
+ bool has_image_;
+ bool can_render_;
+ bool can_render_to_;
+ bool npot_;
+
+ // Since we will be hashing this signature structure, the padding must be
+ // zero initialized. Although the C++11 specifications specify that this is
+ // true, we will use a constructor with a memset to further enforce it instead
+ // of relying on compilers adhering to this deep dark corner specification.
+ TextureSignature(GLenum target,
+ GLint level,
+ GLenum min_filter,
+ GLenum mag_filter,
+ GLenum wrap_s,
+ GLenum wrap_t,
+ GLenum usage,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool has_image,
+ bool can_render,
+ bool can_render_to,
+ bool npot) {
+ memset(this, 0, sizeof(TextureSignature));
+ target_ = target;
+ level_ = level;
+ min_filter_ = min_filter;
+ mag_filter_ = mag_filter;
+ wrap_s_ = wrap_s;
+ wrap_t_ = wrap_t;
+ usage_ = usage;
+ internal_format_ = internal_format;
+ width_ = width;
+ height_ = height;
+ depth_ = depth;
+ border_ = border;
+ format_ = format;
+ type_ = type;
+ has_image_ = has_image;
+ can_render_ = can_render;
+ can_render_to_ = can_render_to;
+ npot_ = npot;
+ }
+};
+
+TextureManager::DestructionObserver::DestructionObserver() {}
+
+TextureManager::DestructionObserver::~DestructionObserver() {}
+
+TextureManager::~TextureManager() {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++)
+ destruction_observers_[i]->OnTextureManagerDestroying(this);
+
+ DCHECK(textures_.empty());
+
+ // If this triggers, that means something is keeping a reference to
+ // a Texture belonging to this.
+ CHECK_EQ(texture_count_, 0u);
+
+ DCHECK_EQ(0, num_unrenderable_textures_);
+ DCHECK_EQ(0, num_unsafe_textures_);
+ DCHECK_EQ(0, num_uncleared_mips_);
+ DCHECK_EQ(0, num_images_);
+}
+
+void TextureManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ textures_.clear();
+ for (int ii = 0; ii < kNumDefaultTextures; ++ii) {
+ default_textures_[ii] = NULL;
+ }
+
+ if (have_context) {
+ glDeleteTextures(arraysize(black_texture_ids_), black_texture_ids_);
+ }
+
+ DCHECK_EQ(0u, memory_tracker_managed_->GetMemRepresented());
+ DCHECK_EQ(0u, memory_tracker_unmanaged_->GetMemRepresented());
+}
+
+Texture::Texture(GLuint service_id)
+ : mailbox_manager_(NULL),
+ memory_tracking_ref_(NULL),
+ service_id_(service_id),
+ cleared_(true),
+ num_uncleared_mips_(0),
+ target_(0),
+ min_filter_(GL_NEAREST_MIPMAP_LINEAR),
+ mag_filter_(GL_LINEAR),
+ wrap_s_(GL_REPEAT),
+ wrap_t_(GL_REPEAT),
+ usage_(GL_NONE),
+ pool_(GL_TEXTURE_POOL_UNMANAGED_CHROMIUM),
+ max_level_set_(-1),
+ texture_complete_(false),
+ cube_complete_(false),
+ npot_(false),
+ has_been_bound_(false),
+ framebuffer_attachment_count_(0),
+ immutable_(false),
+ has_images_(false),
+ estimated_size_(0),
+ can_render_condition_(CAN_RENDER_ALWAYS),
+ texture_max_anisotropy_initialized_(false) {
+}
+
+Texture::~Texture() {
+ if (mailbox_manager_)
+ mailbox_manager_->TextureDeleted(this);
+}
+
+void Texture::AddTextureRef(TextureRef* ref) {
+ DCHECK(refs_.find(ref) == refs_.end());
+ refs_.insert(ref);
+ if (!memory_tracking_ref_) {
+ memory_tracking_ref_ = ref;
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ }
+}
+
+void Texture::RemoveTextureRef(TextureRef* ref, bool have_context) {
+ if (memory_tracking_ref_ == ref) {
+ GetMemTracker()->TrackMemFree(estimated_size());
+ memory_tracking_ref_ = NULL;
+ }
+ size_t result = refs_.erase(ref);
+ DCHECK_EQ(result, 1u);
+ if (refs_.empty()) {
+ if (have_context) {
+ GLuint id = service_id();
+ glDeleteTextures(1, &id);
+ }
+ delete this;
+ } else if (memory_tracking_ref_ == NULL) {
+ // TODO(piman): tune ownership semantics for cross-context group shared
+ // textures.
+ memory_tracking_ref_ = *refs_.begin();
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ }
+}
+
+MemoryTypeTracker* Texture::GetMemTracker() {
+ DCHECK(memory_tracking_ref_);
+ return memory_tracking_ref_->manager()->GetMemTracker(pool_);
+}
+
+Texture::LevelInfo::LevelInfo()
+ : cleared(true),
+ target(0),
+ level(-1),
+ internal_format(0),
+ width(0),
+ height(0),
+ depth(0),
+ border(0),
+ format(0),
+ type(0),
+ estimated_size(0) {
+}
+
+Texture::LevelInfo::LevelInfo(const LevelInfo& rhs)
+ : cleared(rhs.cleared),
+ target(rhs.target),
+ level(rhs.level),
+ internal_format(rhs.internal_format),
+ width(rhs.width),
+ height(rhs.height),
+ depth(rhs.depth),
+ border(rhs.border),
+ format(rhs.format),
+ type(rhs.type),
+ image(rhs.image),
+ estimated_size(rhs.estimated_size) {
+}
+
+Texture::LevelInfo::~LevelInfo() {
+}
+
+Texture::CanRenderCondition Texture::GetCanRenderCondition() const {
+ if (target_ == 0)
+ return CAN_RENDER_ALWAYS;
+
+ if (target_ != GL_TEXTURE_EXTERNAL_OES) {
+ if (level_infos_.empty()) {
+ return CAN_RENDER_NEVER;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ if (first_face.width == 0 ||
+ first_face.height == 0 ||
+ first_face.depth == 0) {
+ return CAN_RENDER_NEVER;
+ }
+ }
+
+ bool needs_mips = NeedsMips();
+ if (needs_mips) {
+ if (!texture_complete())
+ return CAN_RENDER_NEVER;
+ if (target_ == GL_TEXTURE_CUBE_MAP && !cube_complete())
+ return CAN_RENDER_NEVER;
+ }
+
+ bool is_npot_compatible = !needs_mips &&
+ wrap_s_ == GL_CLAMP_TO_EDGE &&
+ wrap_t_ == GL_CLAMP_TO_EDGE;
+
+ if (!is_npot_compatible) {
+ if (target_ == GL_TEXTURE_RECTANGLE_ARB)
+ return CAN_RENDER_NEVER;
+ else if (npot())
+ return CAN_RENDER_ONLY_IF_NPOT;
+ }
+
+ return CAN_RENDER_ALWAYS;
+}
+
+bool Texture::CanRender(const FeatureInfo* feature_info) const {
+ switch (can_render_condition_) {
+ case CAN_RENDER_ALWAYS:
+ return true;
+ case CAN_RENDER_NEVER:
+ return false;
+ case CAN_RENDER_ONLY_IF_NPOT:
+ break;
+ }
+ return feature_info->feature_flags().npot_ok;
+}
+
+void Texture::AddToSignature(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ std::string* signature) const {
+ DCHECK(feature_info);
+ DCHECK(signature);
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+
+ const Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+
+ TextureSignature signature_data(target,
+ level,
+ min_filter_,
+ mag_filter_,
+ wrap_s_,
+ wrap_t_,
+ usage_,
+ info.internal_format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.image.get() != NULL,
+ CanRender(feature_info),
+ CanRenderTo(),
+ npot_);
+
+ signature->append(TextureTag, sizeof(TextureTag));
+ signature->append(reinterpret_cast<const char*>(&signature_data),
+ sizeof(signature_data));
+}
+
+void Texture::SetMailboxManager(MailboxManager* mailbox_manager) {
+ DCHECK(!mailbox_manager_ || mailbox_manager_ == mailbox_manager);
+ mailbox_manager_ = mailbox_manager;
+}
+
+bool Texture::MarkMipmapsGenerated(
+ const FeatureInfo* feature_info) {
+ if (!CanGenerateMipmaps(feature_info)) {
+ return false;
+ }
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const Texture::LevelInfo& info1 = level_infos_[ii][0];
+ GLsizei width = info1.width;
+ GLsizei height = info1.height;
+ GLsizei depth = info1.depth;
+ GLenum target = target_ == GL_TEXTURE_2D ? GL_TEXTURE_2D :
+ GLES2Util::IndexToGLFaceTarget(ii);
+ int num_mips =
+ TextureManager::ComputeMipMapCount(target_, width, height, depth);
+ for (int level = 1; level < num_mips; ++level) {
+ width = std::max(1, width >> 1);
+ height = std::max(1, height >> 1);
+ depth = std::max(1, depth >> 1);
+ SetLevelInfo(feature_info,
+ target,
+ level,
+ info1.internal_format,
+ width,
+ height,
+ depth,
+ info1.border,
+ info1.format,
+ info1.type,
+ true);
+ }
+ }
+
+ return true;
+}
+
+void Texture::SetTarget(
+ const FeatureInfo* feature_info, GLenum target, GLint max_levels) {
+ DCHECK_EQ(0u, target_); // you can only set this once.
+ target_ = target;
+ size_t num_faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ level_infos_.resize(num_faces);
+ for (size_t ii = 0; ii < num_faces; ++ii) {
+ level_infos_[ii].resize(max_levels);
+ }
+
+ if (target == GL_TEXTURE_EXTERNAL_OES || target == GL_TEXTURE_RECTANGLE_ARB) {
+ min_filter_ = GL_LINEAR;
+ wrap_s_ = wrap_t_ = GL_CLAMP_TO_EDGE;
+ }
+
+ if (target == GL_TEXTURE_EXTERNAL_OES) {
+ immutable_ = true;
+ }
+ Update(feature_info);
+ UpdateCanRenderCondition();
+}
+
+bool Texture::CanGenerateMipmaps(
+ const FeatureInfo* feature_info) const {
+ if ((npot() && !feature_info->feature_flags().npot_ok) ||
+ level_infos_.empty() ||
+ target_ == GL_TEXTURE_EXTERNAL_OES ||
+ target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ return false;
+ }
+
+ // Can't generate mips for depth or stencil textures.
+ const Texture::LevelInfo& first = level_infos_[0][0];
+ uint32 channels = GLES2Util::GetChannelsForFormat(first.format);
+ if (channels & (GLES2Util::kDepth | GLES2Util::kStencil)) {
+ return false;
+ }
+
+ // TODO(gman): Check internal_format, format and type.
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const LevelInfo& info = level_infos_[ii][0];
+ if ((info.target == 0) || (info.width != first.width) ||
+ (info.height != first.height) || (info.depth != 1) ||
+ (info.format != first.format) ||
+ (info.internal_format != first.internal_format) ||
+ (info.type != first.type) ||
+ feature_info->validators()->compressed_texture_format.IsValid(
+ info.internal_format) ||
+ info.image.get()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void Texture::SetLevelCleared(GLenum target, GLint level, bool cleared) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ UpdateMipCleared(&info, cleared);
+ UpdateCleared();
+}
+
+void Texture::UpdateCleared() {
+ if (level_infos_.empty()) {
+ return;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+ bool cleared = true;
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (GLint jj = 0; jj < levels_needed; ++jj) {
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.width > 0 && info.height > 0 && info.depth > 0 &&
+ !info.cleared) {
+ cleared = false;
+ break;
+ }
+ }
+ }
+
+ // If texture is uncleared and is attached to a framebuffer,
+ // that framebuffer must be marked possibly incomplete.
+ if (!cleared && IsAttachedToFramebuffer()) {
+ IncAllFramebufferStateChangeCount();
+ }
+
+ UpdateSafeToRenderFrom(cleared);
+}
+
+void Texture::UpdateSafeToRenderFrom(bool cleared) {
+ if (cleared_ == cleared)
+ return;
+ cleared_ = cleared;
+ int delta = cleared ? -1 : +1;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateSafeToRenderFrom(delta);
+}
+
+void Texture::UpdateMipCleared(LevelInfo* info, bool cleared) {
+ if (info->cleared == cleared)
+ return;
+ info->cleared = cleared;
+ int delta = cleared ? -1 : +1;
+ num_uncleared_mips_ += delta;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateUnclearedMips(delta);
+}
+
+void Texture::UpdateCanRenderCondition() {
+ CanRenderCondition can_render_condition = GetCanRenderCondition();
+ if (can_render_condition_ == can_render_condition)
+ return;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateCanRenderCondition(can_render_condition_,
+ can_render_condition);
+ can_render_condition_ = can_render_condition;
+}
+
+void Texture::UpdateHasImages() {
+ if (level_infos_.empty())
+ return;
+
+ bool has_images = false;
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (size_t jj = 0; jj < level_infos_[ii].size(); ++jj) {
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.image.get() != NULL) {
+ has_images = true;
+ break;
+ }
+ }
+ }
+
+ if (has_images_ == has_images)
+ return;
+ has_images_ = has_images;
+ int delta = has_images ? +1 : -1;
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->UpdateNumImages(delta);
+}
+
+void Texture::IncAllFramebufferStateChangeCount() {
+ for (RefSet::iterator it = refs_.begin(); it != refs_.end(); ++it)
+ (*it)->manager()->IncFramebufferStateChangeCount();
+}
+
+void Texture::SetLevelInfo(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ DCHECK_GE(width, 0);
+ DCHECK_GE(height, 0);
+ DCHECK_GE(depth, 0);
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ info.target = target;
+ info.level = level;
+ info.internal_format = internal_format;
+ info.width = width;
+ info.height = height;
+ info.depth = depth;
+ info.border = border;
+ info.format = format;
+ info.type = type;
+ info.image = 0;
+
+ estimated_size_ -= info.estimated_size;
+ GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, 4, &info.estimated_size, NULL, NULL);
+ estimated_size_ += info.estimated_size;
+
+ UpdateMipCleared(&info, cleared);
+ max_level_set_ = std::max(max_level_set_, level);
+ Update(feature_info);
+ UpdateCleared();
+ UpdateCanRenderCondition();
+ UpdateHasImages();
+ if (IsAttachedToFramebuffer()) {
+ // TODO(gman): If textures tracked which framebuffers they were attached to
+ // we could just mark those framebuffers as not complete.
+ IncAllFramebufferStateChangeCount();
+ }
+}
+
+bool Texture::ValidForTexture(
+ GLint target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum type) const {
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ int32 right;
+ int32 top;
+ return SafeAddInt32(xoffset, width, &right) &&
+ SafeAddInt32(yoffset, height, &top) &&
+ xoffset >= 0 &&
+ yoffset >= 0 &&
+ right <= info.width &&
+ top <= info.height &&
+ type == info.type;
+ }
+ return false;
+}
+
+bool Texture::GetLevelSize(
+ GLint target, GLint level, GLsizei* width, GLsizei* height) const {
+ DCHECK(width);
+ DCHECK(height);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ *width = info.width;
+ *height = info.height;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Texture::GetLevelType(
+ GLint target, GLint level, GLenum* type, GLenum* internal_format) const {
+ DCHECK(type);
+ DCHECK(internal_format);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ *type = info.type;
+ *internal_format = info.internal_format;
+ return true;
+ }
+ }
+ return false;
+}
+
+GLenum Texture::SetParameteri(
+ const FeatureInfo* feature_info, GLenum pname, GLint param) {
+ DCHECK(feature_info);
+
+ if (target_ == GL_TEXTURE_EXTERNAL_OES ||
+ target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ if (pname == GL_TEXTURE_MIN_FILTER &&
+ (param != GL_NEAREST && param != GL_LINEAR))
+ return GL_INVALID_ENUM;
+ if ((pname == GL_TEXTURE_WRAP_S || pname == GL_TEXTURE_WRAP_T) &&
+ param != GL_CLAMP_TO_EDGE)
+ return GL_INVALID_ENUM;
+ }
+
+ switch (pname) {
+ case GL_TEXTURE_MIN_FILTER:
+ if (!feature_info->validators()->texture_min_filter_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ min_filter_ = param;
+ break;
+ case GL_TEXTURE_MAG_FILTER:
+ if (!feature_info->validators()->texture_mag_filter_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ mag_filter_ = param;
+ break;
+ case GL_TEXTURE_POOL_CHROMIUM:
+ if (!feature_info->validators()->texture_pool.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ GetMemTracker()->TrackMemFree(estimated_size());
+ pool_ = param;
+ GetMemTracker()->TrackMemAlloc(estimated_size());
+ break;
+ case GL_TEXTURE_WRAP_S:
+ if (!feature_info->validators()->texture_wrap_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ wrap_s_ = param;
+ break;
+ case GL_TEXTURE_WRAP_T:
+ if (!feature_info->validators()->texture_wrap_mode.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ wrap_t_ = param;
+ break;
+ case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+ if (param < 1) {
+ return GL_INVALID_VALUE;
+ }
+ break;
+ case GL_TEXTURE_USAGE_ANGLE:
+ if (!feature_info->validators()->texture_usage.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ usage_ = param;
+ break;
+ default:
+ NOTREACHED();
+ return GL_INVALID_ENUM;
+ }
+ Update(feature_info);
+ UpdateCleared();
+ UpdateCanRenderCondition();
+ return GL_NO_ERROR;
+}
+
+GLenum Texture::SetParameterf(
+ const FeatureInfo* feature_info, GLenum pname, GLfloat param) {
+ switch (pname) {
+ case GL_TEXTURE_MIN_FILTER:
+ case GL_TEXTURE_MAG_FILTER:
+ case GL_TEXTURE_POOL_CHROMIUM:
+ case GL_TEXTURE_WRAP_S:
+ case GL_TEXTURE_WRAP_T:
+ case GL_TEXTURE_USAGE_ANGLE:
+ {
+ GLint iparam = static_cast<GLint>(param);
+ return SetParameteri(feature_info, pname, iparam);
+ }
+ case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+ if (param < 1.f) {
+ return GL_INVALID_VALUE;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return GL_INVALID_ENUM;
+ }
+ return GL_NO_ERROR;
+}
+
+void Texture::Update(const FeatureInfo* feature_info) {
+ // Update npot status.
+ // Assume GL_TEXTURE_EXTERNAL_OES textures are npot, all others
+ npot_ = target_ == GL_TEXTURE_EXTERNAL_OES;
+
+ if (level_infos_.empty()) {
+ texture_complete_ = false;
+ cube_complete_ = false;
+ return;
+ }
+
+ // checks that the first mip of any face is npot.
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ const Texture::LevelInfo& info = level_infos_[ii][0];
+ if (GLES2Util::IsNPOT(info.width) ||
+ GLES2Util::IsNPOT(info.height) ||
+ GLES2Util::IsNPOT(info.depth)) {
+ npot_ = true;
+ break;
+ }
+ }
+
+ // Update texture_complete and cube_complete status.
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+ texture_complete_ =
+ max_level_set_ >= (levels_needed - 1) && max_level_set_ >= 0;
+ cube_complete_ = (level_infos_.size() == 6) &&
+ (first_face.width == first_face.height);
+
+ if (first_face.width == 0 || first_face.height == 0) {
+ texture_complete_ = false;
+ }
+ if (first_face.type == GL_FLOAT &&
+ !feature_info->feature_flags().enable_texture_float_linear &&
+ (min_filter_ != GL_NEAREST_MIPMAP_NEAREST ||
+ mag_filter_ != GL_NEAREST)) {
+ texture_complete_ = false;
+ } else if (first_face.type == GL_HALF_FLOAT_OES &&
+ !feature_info->feature_flags().enable_texture_half_float_linear &&
+ (min_filter_ != GL_NEAREST_MIPMAP_NEAREST ||
+ mag_filter_ != GL_NEAREST)) {
+ texture_complete_ = false;
+ }
+ for (size_t ii = 0;
+ ii < level_infos_.size() && (cube_complete_ || texture_complete_);
+ ++ii) {
+ const Texture::LevelInfo& level0 = level_infos_[ii][0];
+ if (level0.target == 0 ||
+ level0.width != first_face.width ||
+ level0.height != first_face.height ||
+ level0.depth != 1 ||
+ level0.internal_format != first_face.internal_format ||
+ level0.format != first_face.format ||
+ level0.type != first_face.type) {
+ cube_complete_ = false;
+ }
+ // Get level0 dimensions
+ GLsizei width = level0.width;
+ GLsizei height = level0.height;
+ GLsizei depth = level0.depth;
+ for (GLint jj = 1; jj < levels_needed; ++jj) {
+ // compute required size for mip.
+ width = std::max(1, width >> 1);
+ height = std::max(1, height >> 1);
+ depth = std::max(1, depth >> 1);
+ const Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.target == 0 ||
+ info.width != width ||
+ info.height != height ||
+ info.depth != depth ||
+ info.internal_format != level0.internal_format ||
+ info.format != level0.format ||
+ info.type != level0.type) {
+ texture_complete_ = false;
+ break;
+ }
+ }
+ }
+}
+
+bool Texture::ClearRenderableLevels(GLES2Decoder* decoder) {
+ DCHECK(decoder);
+ if (cleared_) {
+ return true;
+ }
+
+ const Texture::LevelInfo& first_face = level_infos_[0][0];
+ int levels_needed = TextureManager::ComputeMipMapCount(
+ target_, first_face.width, first_face.height, first_face.depth);
+
+ for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
+ for (GLint jj = 0; jj < levels_needed; ++jj) {
+ Texture::LevelInfo& info = level_infos_[ii][jj];
+ if (info.target != 0) {
+ if (!ClearLevel(decoder, info.target, jj)) {
+ return false;
+ }
+ }
+ }
+ }
+ UpdateSafeToRenderFrom(true);
+ return true;
+}
+
+bool Texture::IsLevelCleared(GLenum target, GLint level) const {
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (face_index >= level_infos_.size() ||
+ level >= static_cast<GLint>(level_infos_[face_index].size())) {
+ return true;
+ }
+
+ const Texture::LevelInfo& info = level_infos_[face_index][level];
+
+ return info.cleared;
+}
+
+void Texture::InitTextureMaxAnisotropyIfNeeded(GLenum target) {
+ if (texture_max_anisotropy_initialized_)
+ return;
+ texture_max_anisotropy_initialized_ = true;
+ GLfloat params[] = { 1.0f };
+ glTexParameterfv(target, GL_TEXTURE_MAX_ANISOTROPY_EXT, params);
+}
+
+bool Texture::ClearLevel(
+ GLES2Decoder* decoder, GLenum target, GLint level) {
+ DCHECK(decoder);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (face_index >= level_infos_.size() ||
+ level >= static_cast<GLint>(level_infos_[face_index].size())) {
+ return true;
+ }
+
+ Texture::LevelInfo& info = level_infos_[face_index][level];
+
+ DCHECK(target == info.target);
+
+ if (info.target == 0 ||
+ info.cleared ||
+ info.width == 0 ||
+ info.height == 0 ||
+ info.depth == 0) {
+ return true;
+ }
+
+ // NOTE: It seems kind of gross to call back into the decoder for this
+ // but only the decoder knows all the state (like unpack_alignment_) that's
+ // needed to be able to call GL correctly.
+ bool cleared = decoder->ClearLevel(
+ service_id_, target_, info.target, info.level, info.internal_format,
+ info.format, info.type, info.width, info.height, immutable_);
+ UpdateMipCleared(&info, cleared);
+ return info.cleared;
+}
+
+void Texture::SetLevelImage(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image) {
+ DCHECK_GE(level, 0);
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK_LT(static_cast<size_t>(face_index),
+ level_infos_.size());
+ DCHECK_LT(static_cast<size_t>(level),
+ level_infos_[face_index].size());
+ Texture::LevelInfo& info =
+ level_infos_[face_index][level];
+ DCHECK_EQ(info.target, target);
+ DCHECK_EQ(info.level, level);
+ info.image = image;
+ UpdateCanRenderCondition();
+ UpdateHasImages();
+}
+
+gfx::GLImage* Texture::GetLevelImage(GLint target, GLint level) const {
+ if (target != GL_TEXTURE_2D && target != GL_TEXTURE_EXTERNAL_OES &&
+ target != GL_TEXTURE_RECTANGLE_ARB) {
+ return NULL;
+ }
+
+ size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
+ if (level >= 0 && face_index < level_infos_.size() &&
+ static_cast<size_t>(level) < level_infos_[face_index].size()) {
+ const LevelInfo& info = level_infos_[face_index][level];
+ if (info.target != 0) {
+ return info.image.get();
+ }
+ }
+ return NULL;
+}
+
+void Texture::OnWillModifyPixels() {
+ gfx::GLImage* image = GetLevelImage(target(), 0);
+ if (image)
+ image->WillModifyTexImage();
+}
+
+void Texture::OnDidModifyPixels() {
+ gfx::GLImage* image = GetLevelImage(target(), 0);
+ if (image)
+ image->DidModifyTexImage();
+}
+
+TextureRef::TextureRef(TextureManager* manager,
+ GLuint client_id,
+ Texture* texture)
+ : manager_(manager),
+ texture_(texture),
+ client_id_(client_id),
+ num_observers_(0) {
+ DCHECK(manager_);
+ DCHECK(texture_);
+ texture_->AddTextureRef(this);
+ manager_->StartTracking(this);
+}
+
+scoped_refptr<TextureRef> TextureRef::Create(TextureManager* manager,
+ GLuint client_id,
+ GLuint service_id) {
+ return new TextureRef(manager, client_id, new Texture(service_id));
+}
+
+TextureRef::~TextureRef() {
+ manager_->StopTracking(this);
+ texture_->RemoveTextureRef(this, manager_->have_context_);
+ manager_ = NULL;
+}
+
+TextureManager::TextureManager(MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ GLint max_texture_size,
+ GLint max_cube_map_texture_size,
+ bool use_default_textures)
+ : memory_tracker_managed_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kManaged)),
+ memory_tracker_unmanaged_(
+ new MemoryTypeTracker(memory_tracker, MemoryTracker::kUnmanaged)),
+ feature_info_(feature_info),
+ framebuffer_manager_(NULL),
+ max_texture_size_(max_texture_size),
+ max_cube_map_texture_size_(max_cube_map_texture_size),
+ max_levels_(ComputeMipMapCount(GL_TEXTURE_2D,
+ max_texture_size,
+ max_texture_size,
+ max_texture_size)),
+ max_cube_map_levels_(ComputeMipMapCount(GL_TEXTURE_CUBE_MAP,
+ max_cube_map_texture_size,
+ max_cube_map_texture_size,
+ max_cube_map_texture_size)),
+ use_default_textures_(use_default_textures),
+ num_unrenderable_textures_(0),
+ num_unsafe_textures_(0),
+ num_uncleared_mips_(0),
+ num_images_(0),
+ texture_count_(0),
+ have_context_(true) {
+ for (int ii = 0; ii < kNumDefaultTextures; ++ii) {
+ black_texture_ids_[ii] = 0;
+ }
+}
+
+bool TextureManager::Initialize() {
+ // TODO(gman): The default textures have to be real textures, not the 0
+ // texture because we simulate non shared resources on top of shared
+ // resources and all contexts that share resource share the same default
+ // texture.
+ default_textures_[kTexture2D] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_2D, &black_texture_ids_[kTexture2D]);
+ default_textures_[kCubeMap] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_CUBE_MAP, &black_texture_ids_[kCubeMap]);
+
+ if (feature_info_->feature_flags().oes_egl_image_external) {
+ default_textures_[kExternalOES] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_EXTERNAL_OES, &black_texture_ids_[kExternalOES]);
+ }
+
+ if (feature_info_->feature_flags().arb_texture_rectangle) {
+ default_textures_[kRectangleARB] = CreateDefaultAndBlackTextures(
+ GL_TEXTURE_RECTANGLE_ARB, &black_texture_ids_[kRectangleARB]);
+ }
+
+ return true;
+}
+
+scoped_refptr<TextureRef>
+ TextureManager::CreateDefaultAndBlackTextures(
+ GLenum target,
+ GLuint* black_texture) {
+ static uint8 black[] = {0, 0, 0, 255};
+
+ // Sampling a texture not associated with any EGLImage sibling will return
+ // black values according to the spec.
+ bool needs_initialization = (target != GL_TEXTURE_EXTERNAL_OES);
+ bool needs_faces = (target == GL_TEXTURE_CUBE_MAP);
+
+ // Make default textures and texture for replacing non-renderable textures.
+ GLuint ids[2];
+ const int num_ids = use_default_textures_ ? 2 : 1;
+ glGenTextures(num_ids, ids);
+ for (int ii = 0; ii < num_ids; ++ii) {
+ glBindTexture(target, ids[ii]);
+ if (needs_initialization) {
+ if (needs_faces) {
+ for (int jj = 0; jj < GLES2Util::kNumFaces; ++jj) {
+ glTexImage2D(GLES2Util::IndexToGLFaceTarget(jj), 0, GL_RGBA, 1, 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, black);
+ }
+ } else {
+ glTexImage2D(target, 0, GL_RGBA, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, black);
+ }
+ }
+ }
+ glBindTexture(target, 0);
+
+ scoped_refptr<TextureRef> default_texture;
+ if (use_default_textures_) {
+ default_texture = TextureRef::Create(this, 0, ids[1]);
+ SetTarget(default_texture.get(), target);
+ if (needs_faces) {
+ for (int ii = 0; ii < GLES2Util::kNumFaces; ++ii) {
+ SetLevelInfo(default_texture.get(),
+ GLES2Util::IndexToGLFaceTarget(ii),
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ }
+ } else {
+ if (needs_initialization) {
+ SetLevelInfo(default_texture.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ } else {
+ SetLevelInfo(default_texture.get(),
+ GL_TEXTURE_EXTERNAL_OES,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ }
+ }
+ }
+
+ *black_texture = ids[0];
+ return default_texture;
+}
+
+bool TextureManager::ValidForTarget(
+ GLenum target, GLint level, GLsizei width, GLsizei height, GLsizei depth) {
+ GLsizei max_size = MaxSizeForTarget(target) >> level;
+ return level >= 0 &&
+ width >= 0 &&
+ height >= 0 &&
+ depth >= 0 &&
+ level < MaxLevelsForTarget(target) &&
+ width <= max_size &&
+ height <= max_size &&
+ depth <= max_size &&
+ (level == 0 || feature_info_->feature_flags().npot_ok ||
+ (!GLES2Util::IsNPOT(width) &&
+ !GLES2Util::IsNPOT(height) &&
+ !GLES2Util::IsNPOT(depth))) &&
+ (target != GL_TEXTURE_CUBE_MAP || (width == height && depth == 1)) &&
+ (target != GL_TEXTURE_2D || (depth == 1));
+}
+
+void TextureManager::SetTarget(TextureRef* ref, GLenum target) {
+ DCHECK(ref);
+ ref->texture()
+ ->SetTarget(feature_info_.get(), target, MaxLevelsForTarget(target));
+}
+
+void TextureManager::SetLevelCleared(TextureRef* ref,
+ GLenum target,
+ GLint level,
+ bool cleared) {
+ DCHECK(ref);
+ ref->texture()->SetLevelCleared(target, level, cleared);
+}
+
+bool TextureManager::ClearRenderableLevels(
+ GLES2Decoder* decoder, TextureRef* ref) {
+ DCHECK(ref);
+ return ref->texture()->ClearRenderableLevels(decoder);
+}
+
+bool TextureManager::ClearTextureLevel(
+ GLES2Decoder* decoder, TextureRef* ref,
+ GLenum target, GLint level) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ if (texture->num_uncleared_mips() == 0) {
+ return true;
+ }
+ bool result = texture->ClearLevel(decoder, target, level);
+ texture->UpdateCleared();
+ return result;
+}
+
+void TextureManager::SetLevelInfo(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+
+ texture->GetMemTracker()->TrackMemFree(texture->estimated_size());
+ texture->SetLevelInfo(feature_info_.get(),
+ target,
+ level,
+ internal_format,
+ width,
+ height,
+ depth,
+ border,
+ format,
+ type,
+ cleared);
+ texture->GetMemTracker()->TrackMemAlloc(texture->estimated_size());
+}
+
+Texture* TextureManager::Produce(TextureRef* ref) {
+ DCHECK(ref);
+ return ref->texture();
+}
+
+TextureRef* TextureManager::Consume(
+ GLuint client_id,
+ Texture* texture) {
+ DCHECK(client_id);
+ scoped_refptr<TextureRef> ref(new TextureRef(this, client_id, texture));
+ bool result = textures_.insert(std::make_pair(client_id, ref)).second;
+ DCHECK(result);
+ return ref.get();
+}
+
+void TextureManager::SetParameteri(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLint param) {
+ DCHECK(error_state);
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ GLenum result = texture->SetParameteri(feature_info_.get(), pname, param);
+ if (result != GL_NO_ERROR) {
+ if (result == GL_INVALID_ENUM) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, param, "param");
+ } else {
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAMI(
+ error_state, result, function_name, pname, param);
+ }
+ } else {
+ // Texture tracking pools exist only for the command decoder, so
+ // do not pass them on to the native GL implementation.
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ glTexParameteri(texture->target(), pname, param);
+ }
+ }
+}
+
+void TextureManager::SetParameterf(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLfloat param) {
+ DCHECK(error_state);
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ GLenum result = texture->SetParameterf(feature_info_.get(), pname, param);
+ if (result != GL_NO_ERROR) {
+ if (result == GL_INVALID_ENUM) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, param, "param");
+ } else {
+ ERRORSTATE_SET_GL_ERROR_INVALID_PARAMF(
+ error_state, result, function_name, pname, param);
+ }
+ } else {
+ // Texture tracking pools exist only for the command decoder, so
+ // do not pass them on to the native GL implementation.
+ if (pname != GL_TEXTURE_POOL_CHROMIUM) {
+ glTexParameterf(texture->target(), pname, param);
+ }
+ }
+}
+
+bool TextureManager::MarkMipmapsGenerated(TextureRef* ref) {
+ DCHECK(ref);
+ Texture* texture = ref->texture();
+ texture->GetMemTracker()->TrackMemFree(texture->estimated_size());
+ bool result = texture->MarkMipmapsGenerated(feature_info_.get());
+ texture->GetMemTracker()->TrackMemAlloc(texture->estimated_size());
+ return result;
+}
+
+TextureRef* TextureManager::CreateTexture(
+ GLuint client_id, GLuint service_id) {
+ DCHECK_NE(0u, service_id);
+ scoped_refptr<TextureRef> ref(TextureRef::Create(
+ this, client_id, service_id));
+ std::pair<TextureMap::iterator, bool> result =
+ textures_.insert(std::make_pair(client_id, ref));
+ DCHECK(result.second);
+ return ref.get();
+}
+
+TextureRef* TextureManager::GetTexture(
+ GLuint client_id) const {
+ TextureMap::const_iterator it = textures_.find(client_id);
+ return it != textures_.end() ? it->second.get() : NULL;
+}
+
+void TextureManager::RemoveTexture(GLuint client_id) {
+ TextureMap::iterator it = textures_.find(client_id);
+ if (it != textures_.end()) {
+ it->second->reset_client_id();
+ textures_.erase(it);
+ }
+}
+
+void TextureManager::StartTracking(TextureRef* ref) {
+ Texture* texture = ref->texture();
+ ++texture_count_;
+ num_uncleared_mips_ += texture->num_uncleared_mips();
+ if (!texture->SafeToRenderFrom())
+ ++num_unsafe_textures_;
+ if (!texture->CanRender(feature_info_.get()))
+ ++num_unrenderable_textures_;
+ if (texture->HasImages())
+ ++num_images_;
+}
+
+void TextureManager::StopTracking(TextureRef* ref) {
+ if (ref->num_observers()) {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++) {
+ destruction_observers_[i]->OnTextureRefDestroying(ref);
+ }
+ DCHECK_EQ(ref->num_observers(), 0);
+ }
+
+ Texture* texture = ref->texture();
+
+ --texture_count_;
+ if (texture->HasImages()) {
+ DCHECK_NE(0, num_images_);
+ --num_images_;
+ }
+ if (!texture->CanRender(feature_info_.get())) {
+ DCHECK_NE(0, num_unrenderable_textures_);
+ --num_unrenderable_textures_;
+ }
+ if (!texture->SafeToRenderFrom()) {
+ DCHECK_NE(0, num_unsafe_textures_);
+ --num_unsafe_textures_;
+ }
+ num_uncleared_mips_ -= texture->num_uncleared_mips();
+ DCHECK_GE(num_uncleared_mips_, 0);
+}
+
+MemoryTypeTracker* TextureManager::GetMemTracker(GLenum tracking_pool) {
+ switch (tracking_pool) {
+ case GL_TEXTURE_POOL_MANAGED_CHROMIUM:
+ return memory_tracker_managed_.get();
+ break;
+ case GL_TEXTURE_POOL_UNMANAGED_CHROMIUM:
+ return memory_tracker_unmanaged_.get();
+ break;
+ default:
+ break;
+ }
+ NOTREACHED();
+ return NULL;
+}
+
+Texture* TextureManager::GetTextureForServiceId(GLuint service_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (TextureMap::const_iterator it = textures_.begin();
+ it != textures_.end(); ++it) {
+ Texture* texture = it->second->texture();
+ if (texture->service_id() == service_id)
+ return texture;
+ }
+ return NULL;
+}
+
+GLsizei TextureManager::ComputeMipMapCount(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth) {
+ switch (target) {
+ case GL_TEXTURE_EXTERNAL_OES:
+ return 1;
+ default:
+ return 1 +
+ base::bits::Log2Floor(std::max(std::max(width, height), depth));
+ }
+}
+
+void TextureManager::SetLevelImage(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image) {
+ DCHECK(ref);
+ ref->texture()->SetLevelImage(feature_info_.get(), target, level, image);
+}
+
+size_t TextureManager::GetSignatureSize() const {
+ return sizeof(TextureTag) + sizeof(TextureSignature);
+}
+
+void TextureManager::AddToSignature(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ std::string* signature) const {
+ ref->texture()->AddToSignature(feature_info_.get(), target, level, signature);
+}
+
+void TextureManager::UpdateSafeToRenderFrom(int delta) {
+ num_unsafe_textures_ += delta;
+ DCHECK_GE(num_unsafe_textures_, 0);
+}
+
+void TextureManager::UpdateUnclearedMips(int delta) {
+ num_uncleared_mips_ += delta;
+ DCHECK_GE(num_uncleared_mips_, 0);
+}
+
+void TextureManager::UpdateCanRenderCondition(
+ Texture::CanRenderCondition old_condition,
+ Texture::CanRenderCondition new_condition) {
+ if (old_condition == Texture::CAN_RENDER_NEVER ||
+ (old_condition == Texture::CAN_RENDER_ONLY_IF_NPOT &&
+ !feature_info_->feature_flags().npot_ok)) {
+ DCHECK_GT(num_unrenderable_textures_, 0);
+ --num_unrenderable_textures_;
+ }
+ if (new_condition == Texture::CAN_RENDER_NEVER ||
+ (new_condition == Texture::CAN_RENDER_ONLY_IF_NPOT &&
+ !feature_info_->feature_flags().npot_ok))
+ ++num_unrenderable_textures_;
+}
+
+void TextureManager::UpdateNumImages(int delta) {
+ num_images_ += delta;
+ DCHECK_GE(num_images_, 0);
+}
+
+void TextureManager::IncFramebufferStateChangeCount() {
+ if (framebuffer_manager_)
+ framebuffer_manager_->IncFramebufferStateChangeCount();
+}
+
+bool TextureManager::ValidateFormatAndTypeCombination(
+ ErrorState* error_state, const char* function_name, GLenum format,
+ GLenum type) {
+ if (!feature_info_->GetTextureFormatValidator(format).IsValid(type)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string("invalid type ") +
+ GLES2Util::GetStringEnum(type) + " for format " +
+ GLES2Util::GetStringEnum(format)).c_str());
+ return false;
+ }
+ return true;
+}
+
+bool TextureManager::ValidateTextureParameters(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type, GLenum internal_format, GLint level) {
+ const Validators* validators = feature_info_->validators();
+ if (!validators->texture_format.IsValid(format)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, format, "format");
+ return false;
+ }
+ if (!validators->pixel_type.IsValid(type)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, type, "type");
+ return false;
+ }
+ if (format != internal_format &&
+ !((internal_format == GL_RGBA32F && format == GL_RGBA) ||
+ (internal_format == GL_RGB32F && format == GL_RGB))) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "format != internalformat");
+ return false;
+ }
+ uint32 channels = GLES2Util::GetChannelsForFormat(format);
+ if ((channels & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && level) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string("invalid format ") + GLES2Util::GetStringEnum(format) +
+ " for level != 0").c_str());
+ return false;
+ }
+ return ValidateFormatAndTypeCombination(error_state, function_name,
+ format, type);
+}
+
+// Gets the texture id for a given target.
+TextureRef* TextureManager::GetTextureInfoForTarget(
+ ContextState* state, GLenum target) {
+ TextureUnit& unit = state->texture_units[state->active_texture_unit];
+ TextureRef* texture = NULL;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ texture = unit.bound_texture_2d.get();
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+ texture = unit.bound_texture_cube_map.get();
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ texture = unit.bound_texture_external_oes.get();
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ texture = unit.bound_texture_rectangle_arb.get();
+ break;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+ return texture;
+}
+
+TextureRef* TextureManager::GetTextureInfoForTargetUnlessDefault(
+ ContextState* state, GLenum target) {
+ TextureRef* texture = GetTextureInfoForTarget(state, target);
+ if (!texture)
+ return NULL;
+ if (texture == GetDefaultTextureInfo(target))
+ return NULL;
+ return texture;
+}
+
+bool TextureManager::ValidateTexImage2D(
+ ContextState* state,
+ const char* function_name,
+ const DoTextImage2DArguments& args,
+ TextureRef** texture_ref) {
+ ErrorState* error_state = state->GetErrorState();
+ const Validators* validators = feature_info_->validators();
+ if (!validators->texture_target.IsValid(args.target)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, args.target, "target");
+ return false;
+ }
+ if (!validators->texture_internal_format.IsValid(args.internal_format)) {
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
+ error_state, function_name, args.internal_format,
+ "internalformat");
+ return false;
+ }
+ if (!ValidateTextureParameters(
+ error_state, function_name, args.format, args.type,
+ args.internal_format, args.level)) {
+ return false;
+ }
+ if (!ValidForTarget(args.target, args.level, args.width, args.height, 1) ||
+ args.border != 0) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_VALUE, function_name,
+ "dimensions out of range");
+ return false;
+ }
+ if ((GLES2Util::GetChannelsForFormat(args.format) &
+ (GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && args.pixels) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION,
+ function_name, "can not supply data for depth or stencil textures");
+ return false;
+ }
+
+ TextureRef* local_texture_ref = GetTextureInfoForTarget(state, args.target);
+ if (!local_texture_ref) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "unknown texture for target");
+ return false;
+ }
+ if (local_texture_ref->texture()->IsImmutable()) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "texture is immutable");
+ return false;
+ }
+
+ if (!memory_tracker_managed_->EnsureGPUMemoryAvailable(args.pixels_size)) {
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_OUT_OF_MEMORY, function_name,
+ "out of memory");
+ return false;
+ }
+
+ // Write the TextureReference since this is valid.
+ *texture_ref = local_texture_ref;
+ return true;
+}
+
+void TextureManager::ValidateAndDoTexImage2D(
+ DecoderTextureState* texture_state,
+ ContextState* state,
+ DecoderFramebufferState* framebuffer_state,
+ const DoTextImage2DArguments& args) {
+ TextureRef* texture_ref;
+ if (!ValidateTexImage2D(state, "glTexImage2D", args, &texture_ref)) {
+ return;
+ }
+
+ DoTexImage2D(texture_state, state->GetErrorState(), framebuffer_state,
+ texture_ref, args);
+}
+
+void TextureManager::DoTexImage2D(
+ DecoderTextureState* texture_state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const DoTextImage2DArguments& args) {
+ Texture* texture = texture_ref->texture();
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ GLenum tex_type = 0;
+ GLenum tex_format = 0;
+ bool level_is_same =
+ texture->GetLevelSize(args.target, args.level, &tex_width, &tex_height) &&
+ texture->GetLevelType(args.target, args.level, &tex_type, &tex_format) &&
+ args.width == tex_width && args.height == tex_height &&
+ args.type == tex_type && args.format == tex_format;
+
+ if (level_is_same && !args.pixels) {
+ // Just set the level texture but mark the texture as uncleared.
+ SetLevelInfo(
+ texture_ref,
+ args.target, args.level, args.internal_format, args.width, args.height,
+ 1, args.border, args.format, args.type, false);
+ texture_state->tex_image_2d_failed = false;
+ return;
+ }
+
+ if (texture->IsAttachedToFramebuffer()) {
+ framebuffer_state->clear_state_dirty = true;
+ }
+
+ if (texture_state->texsubimage2d_faster_than_teximage2d &&
+ level_is_same && args.pixels) {
+ {
+ ScopedTextureUploadTimer timer(texture_state);
+ glTexSubImage2D(args.target, args.level, 0, 0, args.width, args.height,
+ args.format, args.type, args.pixels);
+ }
+ SetLevelCleared(texture_ref, args.target, args.level, true);
+ texture_state->tex_image_2d_failed = false;
+ return;
+ }
+
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, "glTexImage2D");
+ {
+ ScopedTextureUploadTimer timer(texture_state);
+ glTexImage2D(
+ args.target, args.level, args.internal_format, args.width, args.height,
+ args.border, args.format, args.type, args.pixels);
+ }
+ GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, "glTexImage2D");
+ if (error == GL_NO_ERROR) {
+ SetLevelInfo(
+ texture_ref,
+ args.target, args.level, args.internal_format, args.width, args.height,
+ 1, args.border, args.format, args.type, args.pixels != NULL);
+ texture_state->tex_image_2d_failed = false;
+ }
+}
+
+ScopedTextureUploadTimer::ScopedTextureUploadTimer(
+ DecoderTextureState* texture_state)
+ : texture_state_(texture_state),
+ begin_time_(base::TimeTicks::HighResNow()) {
+}
+
+ScopedTextureUploadTimer::~ScopedTextureUploadTimer() {
+ texture_state_->texture_upload_count++;
+ texture_state_->total_texture_upload_time +=
+ base::TimeTicks::HighResNow() - begin_time_;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/texture_manager.h b/gpu/command_buffer/service/texture_manager.h
new file mode 100644
index 0000000..df00607
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager.h
@@ -0,0 +1,833 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
+
+#include <algorithm>
+#include <list>
+#include <set>
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/gpu_export.h"
+#include "ui/gl/gl_image.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Decoder;
+struct ContextState;
+struct DecoderFramebufferState;
+class Display;
+class ErrorState;
+class FeatureInfo;
+class FramebufferManager;
+class MailboxManager;
+class TextureManager;
+class TextureRef;
+
+// Info about Textures currently in the system.
+// This class wraps a real GL texture, keeping track of its meta-data. It is
+// jointly owned by possibly multiple TextureRef.
+class GPU_EXPORT Texture {
+ public:
+ explicit Texture(GLuint service_id);
+
+ GLenum min_filter() const {
+ return min_filter_;
+ }
+
+ GLenum mag_filter() const {
+ return mag_filter_;
+ }
+
+ GLenum wrap_s() const {
+ return wrap_s_;
+ }
+
+ GLenum wrap_t() const {
+ return wrap_t_;
+ }
+
+ GLenum usage() const {
+ return usage_;
+ }
+
+ GLenum pool() const {
+ return pool_;
+ }
+
+ int num_uncleared_mips() const {
+ return num_uncleared_mips_;
+ }
+
+ uint32 estimated_size() const {
+ return estimated_size_;
+ }
+
+ bool CanRenderTo() const {
+ return target_ != GL_TEXTURE_EXTERNAL_OES;
+ }
+
+ // The service side OpenGL id of the texture.
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ void SetServiceId(GLuint service_id) {
+ DCHECK(service_id);
+ service_id_ = service_id;
+ }
+
+ // Returns the target this texure was first bound to or 0 if it has not
+ // been bound. Once a texture is bound to a specific target it can never be
+ // bound to a different target.
+ GLenum target() const {
+ return target_;
+ }
+
+ bool SafeToRenderFrom() const {
+ return cleared_;
+ }
+
+ // Get the width and height for a particular level. Returns false if level
+ // does not exist.
+ bool GetLevelSize(
+ GLint target, GLint level, GLsizei* width, GLsizei* height) const;
+
+ // Get the type of a level. Returns false if level does not exist.
+ bool GetLevelType(
+ GLint target, GLint level, GLenum* type, GLenum* internal_format) const;
+
+ // Get the image bound to a particular level. Returns NULL if level
+ // does not exist.
+ gfx::GLImage* GetLevelImage(GLint target, GLint level) const;
+
+ bool HasImages() const {
+ return has_images_;
+ }
+
+ // Returns true of the given dimensions are inside the dimensions of the
+ // level and if the type matches the level.
+ bool ValidForTexture(
+ GLint target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum type) const;
+
+ bool IsValid() const {
+ return !!target();
+ }
+
+ bool IsAttachedToFramebuffer() const {
+ return framebuffer_attachment_count_ != 0;
+ }
+
+ void AttachToFramebuffer() {
+ ++framebuffer_attachment_count_;
+ }
+
+ void DetachFromFramebuffer() {
+ DCHECK_GT(framebuffer_attachment_count_, 0);
+ --framebuffer_attachment_count_;
+ }
+
+ void SetImmutable(bool immutable) {
+ immutable_ = immutable;
+ }
+
+ bool IsImmutable() const {
+ return immutable_;
+ }
+
+ // Whether a particular level/face is cleared.
+ bool IsLevelCleared(GLenum target, GLint level) const;
+
+ // Whether the texture has been defined
+ bool IsDefined() const {
+ return estimated_size() > 0;
+ }
+
+ // Initialize TEXTURE_MAX_ANISOTROPY to 1 if we haven't done so yet.
+ void InitTextureMaxAnisotropyIfNeeded(GLenum target);
+
+ void OnWillModifyPixels();
+ void OnDidModifyPixels();
+
+ private:
+ friend class MailboxManager;
+ friend class MailboxManagerTest;
+ friend class TextureDefinition;
+ friend class TextureManager;
+ friend class TextureRef;
+ friend class TextureTestHelper;
+
+ ~Texture();
+ void AddTextureRef(TextureRef* ref);
+ void RemoveTextureRef(TextureRef* ref, bool have_context);
+ MemoryTypeTracker* GetMemTracker();
+
+ // Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it
+ // depends on context support for non-power-of-two textures (i.e. will be
+ // renderable if NPOT support is in the context, otherwise not, e.g. texture
+ // with a NPOT level). ALWAYS means it doesn't depend on context features
+ // (e.g. complete POT), NEVER means it's not renderable regardless (e.g.
+ // incomplete).
+ enum CanRenderCondition {
+ CAN_RENDER_ALWAYS,
+ CAN_RENDER_NEVER,
+ CAN_RENDER_ONLY_IF_NPOT
+ };
+
+ struct LevelInfo {
+ LevelInfo();
+ LevelInfo(const LevelInfo& rhs);
+ ~LevelInfo();
+
+ bool cleared;
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ scoped_refptr<gfx::GLImage> image;
+ uint32 estimated_size;
+ };
+
+ // Set the info for a particular level.
+ void SetLevelInfo(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+
+ // In GLES2 "texture complete" means it has all required mips for filtering
+ // down to a 1x1 pixel texture, they are in the correct order, they are all
+ // the same format.
+ bool texture_complete() const {
+ return texture_complete_;
+ }
+
+ // In GLES2 "cube complete" means all 6 faces level 0 are defined, all the
+ // same format, all the same dimensions and all width = height.
+ bool cube_complete() const {
+ return cube_complete_;
+ }
+
+ // Whether or not this texture is a non-power-of-two texture.
+ bool npot() const {
+ return npot_;
+ }
+
+ // Marks a particular level as cleared or uncleared.
+ void SetLevelCleared(GLenum target, GLint level, bool cleared);
+
+ // Updates the cleared flag for this texture by inspecting all the mips.
+ void UpdateCleared();
+
+ // Clears any renderable uncleared levels.
+ // Returns false if a GL error was generated.
+ bool ClearRenderableLevels(GLES2Decoder* decoder);
+
+ // Clears the level.
+ // Returns false if a GL error was generated.
+ bool ClearLevel(GLES2Decoder* decoder, GLenum target, GLint level);
+
+ // Sets a texture parameter.
+ // TODO(gman): Expand to SetParameteriv,fv
+ // Returns GL_NO_ERROR on success. Otherwise the error to generate.
+ GLenum SetParameteri(
+ const FeatureInfo* feature_info, GLenum pname, GLint param);
+ GLenum SetParameterf(
+ const FeatureInfo* feature_info, GLenum pname, GLfloat param);
+
+ // Makes each of the mip levels as though they were generated.
+ bool MarkMipmapsGenerated(const FeatureInfo* feature_info);
+
+ bool NeedsMips() const {
+ return min_filter_ != GL_NEAREST && min_filter_ != GL_LINEAR;
+ }
+
+ // True if this texture meets all the GLES2 criteria for rendering.
+ // See section 3.8.2 of the GLES2 spec.
+ bool CanRender(const FeatureInfo* feature_info) const;
+
+ // Returns true if mipmaps can be generated by GL.
+ bool CanGenerateMipmaps(const FeatureInfo* feature_info) const;
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
+ // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(
+ const FeatureInfo* feature_info, GLenum target, GLint max_levels);
+
+ // Update info about this texture.
+ void Update(const FeatureInfo* feature_info);
+
+ // Set the image for a particular level.
+ void SetLevelImage(
+ const FeatureInfo* feature_info,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image);
+
+ // Appends a signature for the given level.
+ void AddToSignature(
+ const FeatureInfo* feature_info,
+ GLenum target, GLint level, std::string* signature) const;
+
+ void SetMailboxManager(MailboxManager* mailbox_manager);
+
+ // Updates the unsafe textures count in all the managers referencing this
+ // texture.
+ void UpdateSafeToRenderFrom(bool cleared);
+
+ // Updates the uncleared mip count in all the managers referencing this
+ // texture.
+ void UpdateMipCleared(LevelInfo* info, bool cleared);
+
+ // Computes the CanRenderCondition flag.
+ CanRenderCondition GetCanRenderCondition() const;
+
+ // Updates the unrenderable texture count in all the managers referencing this
+ // texture.
+ void UpdateCanRenderCondition();
+
+ // Updates the images count in all the managers referencing this
+ // texture.
+ void UpdateHasImages();
+
+ // Increment the framebuffer state change count in all the managers
+ // referencing this texture.
+ void IncAllFramebufferStateChangeCount();
+
+ MailboxManager* mailbox_manager_;
+
+ // Info about each face and level of texture.
+ std::vector<std::vector<LevelInfo> > level_infos_;
+
+ // The texture refs that point to this Texture.
+ typedef std::set<TextureRef*> RefSet;
+ RefSet refs_;
+
+ // The single TextureRef that accounts for memory for this texture. Must be
+ // one of refs_.
+ TextureRef* memory_tracking_ref_;
+
+ // The id of the texure
+ GLuint service_id_;
+
+ // Whether all renderable mips of this texture have been cleared.
+ bool cleared_;
+
+ int num_uncleared_mips_;
+
+ // The target. 0 if unset, otherwise GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP.
+ GLenum target_;
+
+ // Texture parameters.
+ GLenum min_filter_;
+ GLenum mag_filter_;
+ GLenum wrap_s_;
+ GLenum wrap_t_;
+ GLenum usage_;
+ GLenum pool_;
+
+ // The maximum level that has been set.
+ GLint max_level_set_;
+
+ // Whether or not this texture is "texture complete"
+ bool texture_complete_;
+
+ // Whether or not this texture is "cube complete"
+ bool cube_complete_;
+
+ // Whether or not this texture is non-power-of-two
+ bool npot_;
+
+ // Whether this texture has ever been bound.
+ bool has_been_bound_;
+
+ // The number of framebuffers this texture is attached to.
+ int framebuffer_attachment_count_;
+
+ // Whether the texture is immutable and no further changes to the format
+ // or dimensions of the texture object can be made.
+ bool immutable_;
+
+ // Whether or not this texture has images.
+ bool has_images_;
+
+ // Size in bytes this texture is assumed to take in memory.
+ uint32 estimated_size_;
+
+ // Cache of the computed CanRenderCondition flag.
+ CanRenderCondition can_render_condition_;
+
+ // Whether we have initialized TEXTURE_MAX_ANISOTROPY to 1.
+ bool texture_max_anisotropy_initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(Texture);
+};
+
+// This class represents a texture in a client context group. It's mostly 1:1
+// with a client id, though it can outlive the client id if it's still bound to
+// a FBO or another context when destroyed.
+// Multiple TextureRef can point to the same texture with cross-context sharing.
+class GPU_EXPORT TextureRef : public base::RefCounted<TextureRef> {
+ public:
+ TextureRef(TextureManager* manager, GLuint client_id, Texture* texture);
+ static scoped_refptr<TextureRef> Create(TextureManager* manager,
+ GLuint client_id,
+ GLuint service_id);
+
+ void AddObserver() { num_observers_++; }
+ void RemoveObserver() { num_observers_--; }
+
+ const Texture* texture() const { return texture_; }
+ Texture* texture() { return texture_; }
+ GLuint client_id() const { return client_id_; }
+ GLuint service_id() const { return texture_->service_id(); }
+ GLint num_observers() const { return num_observers_; }
+
+ private:
+ friend class base::RefCounted<TextureRef>;
+ friend class Texture;
+ friend class TextureManager;
+
+ ~TextureRef();
+ const TextureManager* manager() const { return manager_; }
+ TextureManager* manager() { return manager_; }
+ void reset_client_id() { client_id_ = 0; }
+
+ TextureManager* manager_;
+ Texture* texture_;
+ GLuint client_id_;
+ GLint num_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureRef);
+};
+
+// Holds data that is per gles2_cmd_decoder, but is related to to the
+// TextureManager.
+struct DecoderTextureState {
+ // total_texture_upload_time automatically initialized to 0 in default
+ // constructor.
+ explicit DecoderTextureState(bool texsubimage2d_faster_than_teximage2d)
+ : tex_image_2d_failed(false),
+ texture_upload_count(0),
+ texsubimage2d_faster_than_teximage2d(
+ texsubimage2d_faster_than_teximage2d) {}
+
+ // This indicates all the following texSubImage2D calls that are part of the
+ // failed texImage2D call should be ignored.
+ bool tex_image_2d_failed;
+
+ // Command buffer stats.
+ int texture_upload_count;
+ base::TimeDelta total_texture_upload_time;
+
+ bool texsubimage2d_faster_than_teximage2d;
+};
+
+// This class keeps track of the textures and their sizes so we can do NPOT and
+// texture complete checking.
+//
+// NOTE: To support shared resources an instance of this class will need to be
+// shared by multiple GLES2Decoders.
+class GPU_EXPORT TextureManager {
+ public:
+ class GPU_EXPORT DestructionObserver {
+ public:
+ DestructionObserver();
+ virtual ~DestructionObserver();
+
+ // Called in ~TextureManager.
+ virtual void OnTextureManagerDestroying(TextureManager* manager) = 0;
+
+ // Called via ~TextureRef.
+ virtual void OnTextureRefDestroying(TextureRef* texture) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DestructionObserver);
+ };
+
+ enum DefaultAndBlackTextures {
+ kTexture2D,
+ kCubeMap,
+ kExternalOES,
+ kRectangleARB,
+ kNumDefaultTextures
+ };
+
+ TextureManager(MemoryTracker* memory_tracker,
+ FeatureInfo* feature_info,
+ GLsizei max_texture_size,
+ GLsizei max_cube_map_texture_size,
+ bool use_default_textures);
+ ~TextureManager();
+
+ void set_framebuffer_manager(FramebufferManager* manager) {
+ framebuffer_manager_ = manager;
+ }
+
+ // Init the texture manager.
+ bool Initialize();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Returns the maximum number of levels.
+ GLint MaxLevelsForTarget(GLenum target) const {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return max_levels_;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return 1;
+ default:
+ return max_cube_map_levels_;
+ }
+ }
+
+ // Returns the maximum size.
+ GLsizei MaxSizeForTarget(GLenum target) const {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
+ return max_texture_size_;
+ default:
+ return max_cube_map_texture_size_;
+ }
+ }
+
+ // Returns the maxium number of levels a texture of the given size can have.
+ static GLsizei ComputeMipMapCount(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth);
+
+ // Checks if a dimensions are valid for a given target.
+ bool ValidForTarget(
+ GLenum target, GLint level,
+ GLsizei width, GLsizei height, GLsizei depth);
+
+ // True if this texture meets all the GLES2 criteria for rendering.
+ // See section 3.8.2 of the GLES2 spec.
+ bool CanRender(const TextureRef* ref) const {
+ return ref->texture()->CanRender(feature_info_.get());
+ }
+
+ // Returns true if mipmaps can be generated by GL.
+ bool CanGenerateMipmaps(const TextureRef* ref) const {
+ return ref->texture()->CanGenerateMipmaps(feature_info_.get());
+ }
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(
+ TextureRef* ref,
+ GLenum target);
+
+ // Set the info for a particular level in a TexureInfo.
+ void SetLevelInfo(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ bool cleared);
+
+ // Adapter to call above function.
+ void SetLevelInfoFromParams(TextureRef* ref,
+ const gpu::AsyncTexImage2DParams& params) {
+ SetLevelInfo(
+ ref, params.target, params.level, params.internal_format,
+ params.width, params.height, 1 /* depth */,
+ params.border, params.format,
+ params.type, true /* cleared */);
+ }
+
+ Texture* Produce(TextureRef* ref);
+
+ // Maps an existing texture into the texture manager, at a given client ID.
+ TextureRef* Consume(GLuint client_id, Texture* texture);
+
+ // Sets a mip as cleared.
+ void SetLevelCleared(TextureRef* ref, GLenum target,
+ GLint level, bool cleared);
+
+ // Sets a texture parameter of a Texture
+ // Returns GL_NO_ERROR on success. Otherwise the error to generate.
+ // TODO(gman): Expand to SetParameteriv,fv
+ void SetParameteri(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLint param);
+ void SetParameterf(
+ const char* function_name, ErrorState* error_state,
+ TextureRef* ref, GLenum pname, GLfloat param);
+
+ // Makes each of the mip levels as though they were generated.
+ // Returns false if that's not allowed for the given texture.
+ bool MarkMipmapsGenerated(TextureRef* ref);
+
+ // Clears any uncleared renderable levels.
+ bool ClearRenderableLevels(GLES2Decoder* decoder, TextureRef* ref);
+
+ // Clear a specific level.
+ bool ClearTextureLevel(
+ GLES2Decoder* decoder, TextureRef* ref, GLenum target, GLint level);
+
+ // Creates a new texture info.
+ TextureRef* CreateTexture(GLuint client_id, GLuint service_id);
+
+ // Gets the texture info for the given texture.
+ TextureRef* GetTexture(GLuint client_id) const;
+
+ // Removes a texture info.
+ void RemoveTexture(GLuint client_id);
+
+ // Gets a Texture for a given service id (note: it assumes the texture object
+ // is still mapped in this TextureManager).
+ Texture* GetTextureForServiceId(GLuint service_id) const;
+
+ TextureRef* GetDefaultTextureInfo(GLenum target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return default_textures_[kTexture2D].get();
+ case GL_TEXTURE_CUBE_MAP:
+ return default_textures_[kCubeMap].get();
+ case GL_TEXTURE_EXTERNAL_OES:
+ return default_textures_[kExternalOES].get();
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return default_textures_[kRectangleARB].get();
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+ }
+
+ bool HaveUnrenderableTextures() const {
+ return num_unrenderable_textures_ > 0;
+ }
+
+ bool HaveUnsafeTextures() const {
+ return num_unsafe_textures_ > 0;
+ }
+
+ bool HaveUnclearedMips() const {
+ return num_uncleared_mips_ > 0;
+ }
+
+ bool HaveImages() const {
+ return num_images_ > 0;
+ }
+
+ GLuint black_texture_id(GLenum target) const {
+ switch (target) {
+ case GL_SAMPLER_2D:
+ return black_texture_ids_[kTexture2D];
+ case GL_SAMPLER_CUBE:
+ return black_texture_ids_[kCubeMap];
+ case GL_SAMPLER_EXTERNAL_OES:
+ return black_texture_ids_[kExternalOES];
+ case GL_SAMPLER_2D_RECT_ARB:
+ return black_texture_ids_[kRectangleARB];
+ default:
+ NOTREACHED();
+ return 0;
+ }
+ }
+
+ size_t mem_represented() const {
+ return
+ memory_tracker_managed_->GetMemRepresented() +
+ memory_tracker_unmanaged_->GetMemRepresented();
+ }
+
+ void SetLevelImage(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ gfx::GLImage* image);
+
+ size_t GetSignatureSize() const;
+
+ void AddToSignature(
+ TextureRef* ref,
+ GLenum target,
+ GLint level,
+ std::string* signature) const;
+
+ void AddObserver(DestructionObserver* observer) {
+ destruction_observers_.push_back(observer);
+ }
+
+ void RemoveObserver(DestructionObserver* observer) {
+ for (unsigned int i = 0; i < destruction_observers_.size(); i++) {
+ if (destruction_observers_[i] == observer) {
+ std::swap(destruction_observers_[i], destruction_observers_.back());
+ destruction_observers_.pop_back();
+ return;
+ }
+ }
+ NOTREACHED();
+ }
+
+ struct DoTextImage2DArguments {
+ GLenum target;
+ GLint level;
+ GLenum internal_format;
+ GLsizei width;
+ GLsizei height;
+ GLint border;
+ GLenum format;
+ GLenum type;
+ const void* pixels;
+ uint32 pixels_size;
+ };
+
+ bool ValidateTexImage2D(
+ ContextState* state,
+ const char* function_name,
+ const DoTextImage2DArguments& args,
+ // Pointer to TextureRef filled in if validation successful.
+ // Presumes the pointer is valid.
+ TextureRef** texture_ref);
+
+ void ValidateAndDoTexImage2D(
+ DecoderTextureState* texture_state,
+ ContextState* state,
+ DecoderFramebufferState* framebuffer_state,
+ const DoTextImage2DArguments& args);
+
+ // TODO(kloveless): Make GetTexture* private once this is no longer called
+ // from gles2_cmd_decoder.
+ TextureRef* GetTextureInfoForTarget(ContextState* state, GLenum target);
+ TextureRef* GetTextureInfoForTargetUnlessDefault(
+ ContextState* state, GLenum target);
+
+ bool ValidateFormatAndTypeCombination(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type);
+
+ // Note that internal_format is only checked in relation to the format
+ // parameter, so that this function may be used to validate texSubImage2D.
+ bool ValidateTextureParameters(
+ ErrorState* error_state, const char* function_name,
+ GLenum format, GLenum type, GLenum internal_format, GLint level);
+
+ private:
+ friend class Texture;
+ friend class TextureRef;
+
+ // Helper for Initialize().
+ scoped_refptr<TextureRef> CreateDefaultAndBlackTextures(
+ GLenum target,
+ GLuint* black_texture);
+
+ void DoTexImage2D(
+ DecoderTextureState* texture_state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const DoTextImage2DArguments& args);
+
+ void StartTracking(TextureRef* texture);
+ void StopTracking(TextureRef* texture);
+
+ void UpdateSafeToRenderFrom(int delta);
+ void UpdateUnclearedMips(int delta);
+ void UpdateCanRenderCondition(Texture::CanRenderCondition old_condition,
+ Texture::CanRenderCondition new_condition);
+ void UpdateNumImages(int delta);
+ void IncFramebufferStateChangeCount();
+
+ MemoryTypeTracker* GetMemTracker(GLenum texture_pool);
+ scoped_ptr<MemoryTypeTracker> memory_tracker_managed_;
+ scoped_ptr<MemoryTypeTracker> memory_tracker_unmanaged_;
+
+ scoped_refptr<FeatureInfo> feature_info_;
+
+ FramebufferManager* framebuffer_manager_;
+
+ // Info for each texture in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<TextureRef> > TextureMap;
+ TextureMap textures_;
+
+ GLsizei max_texture_size_;
+ GLsizei max_cube_map_texture_size_;
+ GLint max_levels_;
+ GLint max_cube_map_levels_;
+
+ const bool use_default_textures_;
+
+ int num_unrenderable_textures_;
+ int num_unsafe_textures_;
+ int num_uncleared_mips_;
+ int num_images_;
+
+ // Counts the number of Textures allocated with 'this' as its manager.
+ // Allows to check no Texture will outlive this.
+ unsigned int texture_count_;
+
+ bool have_context_;
+
+ // Black (0,0,0,1) textures for when non-renderable textures are used.
+ // NOTE: There is no corresponding Texture for these textures.
+ // TextureInfos are only for textures the client side can access.
+ GLuint black_texture_ids_[kNumDefaultTextures];
+
+ // The default textures for each target (texture name = 0)
+ scoped_refptr<TextureRef> default_textures_[kNumDefaultTextures];
+
+ std::vector<DestructionObserver*> destruction_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureManager);
+};
+
+// This class records texture upload time when in scope.
+class ScopedTextureUploadTimer {
+ public:
+ explicit ScopedTextureUploadTimer(DecoderTextureState* texture_state);
+ ~ScopedTextureUploadTimer();
+
+ private:
+ DecoderTextureState* texture_state_;
+ base::TimeTicks begin_time_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTextureUploadTimer);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_
diff --git a/gpu/command_buffer/service/texture_manager_unittest.cc b/gpu/command_buffer/service/texture_manager_unittest.cc
new file mode 100644
index 0000000..2d509ae
--- /dev/null
+++ b/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -0,0 +1,2509 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/texture_manager.h"
+
+#include <utility>
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::AtLeast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArgumentPointee;
+using ::testing::StrictMock;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class TextureTestHelper {
+ public:
+ static bool IsNPOT(const Texture* texture) {
+ return texture->npot();
+ }
+ static bool IsTextureComplete(const Texture* texture) {
+ return texture->texture_complete();
+ }
+ static bool IsCubeComplete(const Texture* texture) {
+ return texture->cube_complete();
+ }
+};
+
+class TextureManagerTest : public GpuServiceTest {
+ public:
+ static const GLint kMaxTextureSize = 16;
+ static const GLint kMaxCubeMapTextureSize = 8;
+ static const GLint kMaxExternalTextureSize = 16;
+ static const GLint kMax2dLevels = 5;
+ static const GLint kMaxCubeMapLevels = 4;
+ static const GLint kMaxExternalLevels = 1;
+ static const bool kUseDefaultTextures = false;
+
+ TextureManagerTest() : feature_info_(new FeatureInfo()) {}
+
+ virtual ~TextureManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ manager_.reset(new TextureManager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ manager_->Initialize();
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ }
+
+ virtual void TearDown() {
+ manager_->Destroy(false);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ void SetParameter(
+ TextureRef* texture_ref, GLenum pname, GLint value, GLenum error) {
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), manager_.get(),
+ texture_ref, pname, value, error);
+ }
+
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> manager_;
+ scoped_ptr<MockErrorState> error_state_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint TextureManagerTest::kMaxTextureSize;
+const GLint TextureManagerTest::kMaxCubeMapTextureSize;
+const GLint TextureManagerTest::kMaxExternalTextureSize;
+const GLint TextureManagerTest::kMax2dLevels;
+const GLint TextureManagerTest::kMaxCubeMapLevels;
+const GLint TextureManagerTest::kMaxExternalLevels;
+#endif
+
+TEST_F(TextureManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ // Check we can create texture.
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ scoped_refptr<TextureRef> texture = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture.get() != NULL);
+ EXPECT_EQ(kService1Id, texture->service_id());
+ EXPECT_EQ(kClient1Id, texture->client_id());
+ EXPECT_EQ(texture->texture(), manager_->GetTextureForServiceId(
+ texture->service_id()));
+ // Check we get nothing for a non-existent texture.
+ EXPECT_TRUE(manager_->GetTexture(kClient2Id) == NULL);
+ // Check trying to a remove non-existent textures does not crash.
+ manager_->RemoveTexture(kClient2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the texture after we remove it.
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(manager_->GetTexture(kClient1Id) == NULL);
+ EXPECT_EQ(0u, texture->client_id());
+}
+
+TEST_F(TextureManagerTest, SetParameter) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create texture.
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture_ref = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+ manager_->SetTarget(texture_ref, GL_TEXTURE_2D);
+ SetParameter(texture_ref, GL_TEXTURE_MIN_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->mag_filter());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_s());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_t());
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1, GL_NO_ERROR);
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 2, GL_NO_ERROR);
+ SetParameter(
+ texture_ref, GL_TEXTURE_MIN_FILTER, GL_CLAMP_TO_EDGE, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_CLAMP_TO_EDGE, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST), texture->min_filter());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_S, GL_NEAREST, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_s());
+ SetParameter(texture_ref, GL_TEXTURE_WRAP_T, GL_NEAREST, GL_INVALID_ENUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_CLAMP_TO_EDGE), texture->wrap_t());
+ SetParameter(texture_ref, GL_TEXTURE_MAX_ANISOTROPY_EXT, 0, GL_INVALID_VALUE);
+}
+
+TEST_F(TextureManagerTest, UseDefaultTexturesTrue) {
+ bool use_default_textures = true;
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", use_default_textures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ use_default_textures);
+ manager.Initialize();
+
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_2D) != NULL);
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP) != NULL);
+
+ // TODO(vmiura): Test GL_TEXTURE_EXTERNAL_OES & GL_TEXTURE_RECTANGLE_ARB.
+
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, UseDefaultTexturesFalse) {
+ bool use_default_textures = false;
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", use_default_textures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ use_default_textures);
+ manager.Initialize();
+
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_2D) == NULL);
+ EXPECT_TRUE(manager.GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP) == NULL);
+
+ // TODO(vmiura): Test GL_TEXTURE_EXTERNAL_OES & GL_TEXTURE_RECTANGLE_ARB.
+
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, TextureUsageExt) {
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "GL_ANGLE_texture_usage", kUseDefaultTextures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.Initialize();
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ // Check we can create texture.
+ manager.CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_USAGE_ANGLE, GL_FRAMEBUFFER_ATTACHMENT_ANGLE, GL_NO_ERROR);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_ATTACHMENT_ANGLE),
+ texture_ref->texture()->usage());
+ manager.Destroy(false);
+}
+
+TEST_F(TextureManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ TextureManager manager(NULL,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.Initialize();
+ // Check we can create texture.
+ manager.CreateTexture(kClient1Id, kService1Id);
+ // Check texture got created.
+ TextureRef* texture = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture != NULL);
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ TestHelper::SetupTextureManagerDestructionExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ manager.Destroy(true);
+ // Check that resources got freed.
+ texture = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture == NULL);
+}
+
+TEST_F(TextureManagerTest, MaxValues) {
+ // Check we get the right values for the max sizes.
+ EXPECT_EQ(kMax2dLevels, manager_->MaxLevelsForTarget(GL_TEXTURE_2D));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_X));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_X));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Y));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Z));
+ EXPECT_EQ(kMaxCubeMapLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z));
+ EXPECT_EQ(kMaxExternalLevels,
+ manager_->MaxLevelsForTarget(GL_TEXTURE_EXTERNAL_OES));
+ EXPECT_EQ(kMaxTextureSize, manager_->MaxSizeForTarget(GL_TEXTURE_2D));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_X));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_X));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Y));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_POSITIVE_Z));
+ EXPECT_EQ(kMaxCubeMapTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z));
+ EXPECT_EQ(kMaxExternalTextureSize,
+ manager_->MaxSizeForTarget(GL_TEXTURE_EXTERNAL_OES));
+}
+
+TEST_F(TextureManagerTest, ValidForTarget) {
+ // check 2d
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, 0, kMaxTextureSize, kMaxTextureSize, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 1, 2, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels - 1, 2, 1, 1));
+ // check level out of range.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels, kMaxTextureSize, 1, 1));
+ // check has depth.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, kMax2dLevels, kMaxTextureSize, 1, 2));
+ // Check NPOT width on level 0
+ EXPECT_TRUE(manager_->ValidForTarget(GL_TEXTURE_2D, 0, 5, 2, 1));
+ // Check NPOT height on level 0
+ EXPECT_TRUE(manager_->ValidForTarget(GL_TEXTURE_2D, 0, 2, 5, 1));
+ // Check NPOT width on level 1
+ EXPECT_FALSE(manager_->ValidForTarget(GL_TEXTURE_2D, 1, 5, 2, 1));
+ // Check NPOT height on level 1
+ EXPECT_FALSE(manager_->ValidForTarget(GL_TEXTURE_2D, 1, 2, 5, 1));
+
+ // check cube
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, 0,
+ kMaxCubeMapTextureSize, kMaxCubeMapTextureSize, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels - 1, 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels - 1, 2, 2, 1));
+ // check level out of range.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 1));
+ // check not square.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 1));
+ // check has depth.
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, kMaxCubeMapLevels,
+ kMaxCubeMapTextureSize, 1, 2));
+
+ for (GLint level = 0; level < kMax2dLevels; ++level) {
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, kMaxTextureSize >> level, 1, 1));
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, 1, kMaxTextureSize >> level, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, (kMaxTextureSize >> level) + 1, 1, 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_2D, level, 1, (kMaxTextureSize >> level) + 1, 1));
+ }
+
+ for (GLint level = 0; level < kMaxCubeMapLevels; ++level) {
+ EXPECT_TRUE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, level,
+ kMaxCubeMapTextureSize >> level,
+ kMaxCubeMapTextureSize >> level,
+ 1));
+ EXPECT_FALSE(manager_->ValidForTarget(
+ GL_TEXTURE_CUBE_MAP, level,
+ (kMaxCubeMapTextureSize >> level) * 2,
+ (kMaxCubeMapTextureSize >> level) * 2,
+ 1));
+ }
+}
+
+TEST_F(TextureManagerTest, ValidForTargetNPOT) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_npot");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ // Check NPOT width on level 0
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 0, 5, 2, 1));
+ // Check NPOT height on level 0
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 0, 2, 5, 1));
+ // Check NPOT width on level 1
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 1, 5, 2, 1));
+ // Check NPOT height on level 1
+ EXPECT_TRUE(manager.ValidForTarget(GL_TEXTURE_2D, 1, 2, 5, 1));
+ manager.Destroy(false);
+}
+
+class TextureTestBase : public GpuServiceTest {
+ public:
+ static const GLint kMaxTextureSize = 16;
+ static const GLint kMaxCubeMapTextureSize = 8;
+ static const GLint kMax2dLevels = 5;
+ static const GLint kMaxCubeMapLevels = 4;
+ static const GLuint kClient1Id = 1;
+ static const GLuint kService1Id = 11;
+ static const bool kUseDefaultTextures = false;
+
+ TextureTestBase()
+ : feature_info_(new FeatureInfo()) {
+ }
+ virtual ~TextureTestBase() {
+ texture_ref_ = NULL;
+ }
+
+ protected:
+ void SetUpBase(MemoryTracker* memory_tracker, std::string extensions) {
+ GpuServiceTest::SetUp();
+ if (!extensions.empty()) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(),
+ extensions.c_str());
+ feature_info_->Initialize();
+ }
+
+ manager_.reset(new TextureManager(memory_tracker,
+ feature_info_.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ decoder_.reset(new ::testing::StrictMock<gles2::MockGLES2Decoder>());
+ error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
+ manager_->CreateTexture(kClient1Id, kService1Id);
+ texture_ref_ = manager_->GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref_.get() != NULL);
+ }
+
+ virtual void TearDown() {
+ if (texture_ref_.get()) {
+ // If it's not in the manager then setting texture_ref_ to NULL will
+ // delete the texture.
+ if (!texture_ref_->client_id()) {
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_,
+ DeleteTextures(1, ::testing::Pointee(texture_ref_->service_id())))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ texture_ref_ = NULL;
+ }
+ manager_->Destroy(false);
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ void SetParameter(
+ TextureRef* texture_ref, GLenum pname, GLint value, GLenum error) {
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), manager_.get(),
+ texture_ref, pname, value, error);
+ }
+
+ scoped_ptr<MockGLES2Decoder> decoder_;
+ scoped_ptr<MockErrorState> error_state_;
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_ptr<TextureManager> manager_;
+ scoped_refptr<TextureRef> texture_ref_;
+};
+
+class TextureTest : public TextureTestBase {
+ protected:
+ virtual void SetUp() {
+ SetUpBase(NULL, std::string());
+ }
+};
+
+class TextureMemoryTrackerTest : public TextureTestBase {
+ protected:
+ virtual void SetUp() {
+ mock_memory_tracker_ = new StrictMock<MockMemoryTracker>();
+ SetUpBase(mock_memory_tracker_.get(), std::string());
+ }
+
+ scoped_refptr<MockMemoryTracker> mock_memory_tracker_;
+};
+
+#define EXPECT_MEMORY_ALLOCATION_CHANGE(old_size, new_size, pool) \
+ EXPECT_CALL(*mock_memory_tracker_.get(), \
+ TrackMemoryAllocatedChange(old_size, new_size, pool)) \
+ .Times(1).RetiresOnSaturation()
+
+TEST_F(TextureTest, Basic) {
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(0u, texture->target());
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(texture->IsImmutable());
+ EXPECT_EQ(static_cast<GLenum>(GL_NEAREST_MIPMAP_LINEAR),
+ texture->min_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_LINEAR), texture->mag_filter());
+ EXPECT_EQ(static_cast<GLenum>(GL_REPEAT), texture->wrap_s());
+ EXPECT_EQ(static_cast<GLenum>(GL_REPEAT), texture->wrap_t());
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_EQ(0u, texture->estimated_size());
+}
+
+TEST_F(TextureTest, SetTargetTexture2D) {
+ Texture* texture = texture_ref_->texture();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(texture->IsImmutable());
+}
+
+TEST_F(TextureTest, SetTargetTextureExternalOES) {
+ Texture* texture = texture_ref_->texture();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(texture->IsImmutable());
+}
+
+TEST_F(TextureTest, ZeroSizeCanNotRender) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+}
+
+TEST_F(TextureTest, EstimatedSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(8u * 4u * 4u, texture_ref_->texture()->estimated_size());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 2,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(8u * 4u * 4u * 2u, texture_ref_->texture()->estimated_size());
+}
+
+TEST_F(TextureMemoryTrackerTest, EstimatedSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 256, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 2,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Add expectation for texture deletion.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(256, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+}
+
+TEST_F(TextureMemoryTrackerTest, SetParameterPool) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 128, MemoryTracker::kManaged);
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM,
+ GL_NO_ERROR);
+ // Add expectation for texture deletion.
+ EXPECT_MEMORY_ALLOCATION_CHANGE(128, 0, MemoryTracker::kManaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kManaged);
+}
+
+TEST_F(TextureTest, POT2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to POT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Set filters to something that will work with a single mip.
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_LINEAR, GL_NO_ERROR);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set them back.
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR_MIPMAP_LINEAR,
+ GL_NO_ERROR);
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Change a mip.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Set a level past the number of mips that would get generated.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 3,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureMemoryTrackerTest, MarkMipmapsGenerated) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 64, MemoryTracker::kUnmanaged);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(64, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 84, MemoryTracker::kUnmanaged);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_MEMORY_ALLOCATION_CHANGE(84, 0, MemoryTracker::kUnmanaged);
+ EXPECT_MEMORY_ALLOCATION_CHANGE(0, 0, MemoryTracker::kUnmanaged);
+}
+
+TEST_F(TextureTest, UnusedMips) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Set level zero to large size.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set level zero to large smaller (levels unused mips)
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Set an unused level to some size
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 4,
+ GL_RGBA,
+ 16,
+ 16,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureTest, NPOT2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to NPOT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_LINEAR, GL_NO_ERROR);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Change it to POT.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+}
+
+TEST_F(TextureTest, NPOT2DNPOTOK) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_npot");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ Texture* texture = texture_ref->texture();
+
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ // Check Setting level 0 to NPOT
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 4, 5, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, true);
+ EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager.CanGenerateMipmaps(texture_ref));
+ EXPECT_FALSE(manager.CanRender(texture_ref));
+ EXPECT_TRUE(manager.HaveUnrenderableTextures());
+ EXPECT_TRUE(manager.MarkMipmapsGenerated(texture_ref));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(manager.CanRender(texture_ref));
+ EXPECT_FALSE(manager.HaveUnrenderableTextures());
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, POTCubeMap) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_CUBE_MAP);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_CUBE_MAP), texture->target());
+ // Check Setting level 0 each face to POT
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+
+ // Change a mip.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(TextureTestHelper::IsNPOT(texture));
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Set a level past the number of mips that would get generated.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 3,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(manager_->CanGenerateMipmaps(texture_ref_.get()));
+ // Make mips.
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_TRUE(TextureTestHelper::IsCubeComplete(texture));
+}
+
+TEST_F(TextureTest, GetLevelSize) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ GLsizei width = -1;
+ GLsizei height = -1;
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, -1, &width, &height));
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 1000, &width, &height));
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height));
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(4, width);
+ EXPECT_EQ(5, height);
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->GetLevelSize(GL_TEXTURE_2D, 1, &width, &height));
+ EXPECT_EQ(4, width);
+ EXPECT_EQ(5, height);
+}
+
+TEST_F(TextureTest, GetLevelType) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ GLenum type = 0;
+ GLenum format = 0;
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, -1, &type, &format));
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, 1000, &type, &format));
+ EXPECT_FALSE(texture->GetLevelType(GL_TEXTURE_2D, 0, &type, &format));
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &format));
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), format);
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->GetLevelType(GL_TEXTURE_2D, 1, &type, &format));
+ EXPECT_EQ(static_cast<GLenum>(GL_UNSIGNED_BYTE), type);
+ EXPECT_EQ(static_cast<GLenum>(GL_RGBA), format);
+}
+
+TEST_F(TextureTest, ValidForTexture) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 5,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ // Check bad face.
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad level.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 0, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad xoffset.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, -1, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad xoffset + width > width.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 1, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad yoffset.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, -1, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad yoffset + height > height.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 1, 4, 5, GL_UNSIGNED_BYTE));
+ // Check bad width.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 5, 5, GL_UNSIGNED_BYTE));
+ // Check bad height.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 6, GL_UNSIGNED_BYTE));
+ // Check bad type.
+ EXPECT_FALSE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_SHORT_4_4_4_4));
+ // Check valid full size
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+ // Check valid particial size.
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 1, 1, 2, 3, GL_UNSIGNED_BYTE));
+ manager_->RemoveTexture(kClient1Id);
+ EXPECT_TRUE(texture->ValidForTexture(
+ GL_TEXTURE_2D, 1, 0, 0, 4, 5, GL_UNSIGNED_BYTE));
+}
+
+TEST_F(TextureTest, FloatNotLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_float");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_FLOAT, true);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager,
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST, GL_NO_ERROR);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, FloatLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_float GL_OES_texture_float_linear");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_FLOAT, true);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, HalfFloatNotLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_half_float");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_HALF_FLOAT_OES, true);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager,
+ texture_ref, GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ EXPECT_FALSE(TextureTestHelper::IsTextureComplete(texture));
+ TestHelper::SetTexParameteriWithExpectations(
+ gl_.get(), error_state_.get(), &manager, texture_ref,
+ GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST, GL_NO_ERROR);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, HalfFloatLinear) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_texture_half_float GL_OES_texture_half_float_linear");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ manager.SetLevelInfo(texture_ref,
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_HALF_FLOAT_OES, true);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, EGLImageExternal) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_OES_EGL_image_external");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_EXTERNAL_OES);
+ Texture* texture = texture_ref->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES), texture->target());
+ EXPECT_FALSE(manager.CanGenerateMipmaps(texture_ref));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, DepthTexture) {
+ TestHelper::SetupFeatureInfoInitExpectations(
+ gl_.get(), "GL_ANGLE_depth_texture");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->Initialize();
+ TextureManager manager(NULL,
+ feature_info.get(),
+ kMaxTextureSize,
+ kMaxCubeMapTextureSize,
+ kUseDefaultTextures);
+ manager.CreateTexture(kClient1Id, kService1Id);
+ TextureRef* texture_ref = manager.GetTexture(kClient1Id);
+ ASSERT_TRUE(texture_ref != NULL);
+ manager.SetTarget(texture_ref, GL_TEXTURE_2D);
+ manager.SetLevelInfo(
+ texture_ref, GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 4, 4, 1, 0,
+ GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, false);
+ EXPECT_FALSE(manager.CanGenerateMipmaps(texture_ref));
+ manager.Destroy(false);
+}
+
+TEST_F(TextureTest, SafeUnsafe) {
+ static const GLuint kClient2Id = 2;
+ static const GLuint kService2Id = 12;
+ static const GLuint kClient3Id = 3;
+ static const GLuint kService3Id = 13;
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 1, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref_.get(), GL_TEXTURE_2D, 1, true);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->MarkMipmapsGenerated(texture_ref_.get());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ scoped_refptr<TextureRef> texture_ref2(
+ manager_->GetTexture(kClient2Id));
+ ASSERT_TRUE(texture_ref2.get() != NULL);
+ manager_->SetTarget(texture_ref2.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ Texture* texture2 = texture_ref2->texture();
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture2->num_uncleared_mips());
+
+ manager_->CreateTexture(kClient3Id, kService3Id);
+ scoped_refptr<TextureRef> texture_ref3(
+ manager_->GetTexture(kClient3Id));
+ ASSERT_TRUE(texture_ref3.get() != NULL);
+ manager_->SetTarget(texture_ref3.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref3.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ Texture* texture3 = texture_ref3->texture();
+ EXPECT_EQ(1, texture3->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref2.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture2->num_uncleared_mips());
+ manager_->SetLevelCleared(texture_ref3.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture3->num_uncleared_mips());
+
+ manager_->SetLevelInfo(texture_ref2.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref3.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 8,
+ 8,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture2->num_uncleared_mips());
+ EXPECT_EQ(1, texture3->num_uncleared_mips());
+ manager_->RemoveTexture(kClient3Id);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ manager_->RemoveTexture(kClient2Id);
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService2Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref2 = NULL;
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService3Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref3 = NULL;
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+}
+
+TEST_F(TextureTest, ClearTexture) {
+ EXPECT_CALL(*decoder_, ClearLevel(_, _, _, _, _, _, _, _, _, _))
+ .WillRepeatedly(Return(true));
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->ClearRenderableLevels(decoder_.get(), texture_ref_.get());
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(2, texture->num_uncleared_mips());
+ manager_->ClearTextureLevel(
+ decoder_.get(), texture_ref_.get(), GL_TEXTURE_2D, 0);
+ EXPECT_FALSE(texture->SafeToRenderFrom());
+ EXPECT_TRUE(manager_->HaveUnsafeTextures());
+ EXPECT_TRUE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(1, texture->num_uncleared_mips());
+ manager_->ClearTextureLevel(
+ decoder_.get(), texture_ref_.get(), GL_TEXTURE_2D, 1);
+ EXPECT_TRUE(texture->SafeToRenderFrom());
+ EXPECT_FALSE(manager_->HaveUnsafeTextures());
+ EXPECT_FALSE(manager_->HaveUnclearedMips());
+ EXPECT_EQ(0, texture->num_uncleared_mips());
+}
+
+TEST_F(TextureTest, UseDeletedTexture) {
+ static const GLuint kClient2Id = 2;
+ static const GLuint kService2Id = 12;
+ // Make the default texture renderable
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ // Make a new texture
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ scoped_refptr<TextureRef> texture_ref(
+ manager_->GetTexture(kClient2Id));
+ manager_->SetTarget(texture_ref.get(), GL_TEXTURE_2D);
+ EXPECT_FALSE(manager_->CanRender(texture_ref.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Remove it.
+ manager_->RemoveTexture(kClient2Id);
+ EXPECT_FALSE(manager_->CanRender(texture_ref.get()));
+ EXPECT_TRUE(manager_->HaveUnrenderableTextures());
+ // Check that we can still manipulate it and it effects the manager.
+ manager_->SetLevelInfo(texture_ref.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_TRUE(manager_->CanRender(texture_ref.get()));
+ EXPECT_FALSE(manager_->HaveUnrenderableTextures());
+ EXPECT_CALL(*gl_, DeleteTextures(1, ::testing::Pointee(kService2Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_ref = NULL;
+}
+
+TEST_F(TextureTest, GetLevelImage) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ // Set image.
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, image.get());
+ EXPECT_FALSE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ // Remove it.
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, NULL);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+ manager_->SetLevelImage(texture_ref_.get(), GL_TEXTURE_2D, 1, image.get());
+ // Image should be reset when SetLevelInfo is called.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_TRUE(texture->GetLevelImage(GL_TEXTURE_2D, 1) == NULL);
+}
+
+namespace {
+
+bool InSet(std::set<std::string>* string_set, const std::string& str) {
+ std::pair<std::set<std::string>::iterator, bool> result =
+ string_set->insert(str);
+ return !result.second;
+}
+
+} // anonymous namespace
+
+TEST_F(TextureTest, AddToSignature) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ std::string signature1;
+ std::string signature2;
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature1);
+
+ std::set<std::string> string_set;
+ EXPECT_FALSE(InSet(&string_set, signature1));
+
+ // check changing 1 thing makes a different signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 4,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // check putting it back makes the same signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check setting cleared status does not change signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check changing other settings changes signature.
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 4,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 2,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_FLOAT,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // put it back
+ manager_->SetLevelInfo(texture_ref_.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // check changing parameters changes signature.
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MIN_FILTER, GL_NEAREST, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(texture_ref_.get(),
+ GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST_MIPMAP_LINEAR,
+ GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MAG_FILTER, GL_NEAREST, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_MAG_FILTER, GL_LINEAR, GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ SetParameter(texture_ref_.get(), GL_TEXTURE_WRAP_S, GL_REPEAT, GL_NO_ERROR);
+ SetParameter(
+ texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_FALSE(InSet(&string_set, signature2));
+
+ // Check putting it back genenerates the same signature
+ SetParameter(texture_ref_.get(), GL_TEXTURE_WRAP_T, GL_REPEAT, GL_NO_ERROR);
+ signature2.clear();
+ manager_->AddToSignature(texture_ref_.get(), GL_TEXTURE_2D, 1, &signature2);
+ EXPECT_EQ(signature1, signature2);
+
+ // Check the set was acutally getting different signatures.
+ EXPECT_EQ(11u, string_set.size());
+}
+
+class ProduceConsumeTextureTest : public TextureTest,
+ public ::testing::WithParamInterface<GLenum> {
+ public:
+ virtual void SetUp() {
+ TextureTest::SetUpBase(NULL, "GL_OES_EGL_image_external");
+ manager_->CreateTexture(kClient2Id, kService2Id);
+ texture2_ = manager_->GetTexture(kClient2Id);
+
+ EXPECT_CALL(*decoder_.get(), GetErrorState())
+ .WillRepeatedly(Return(error_state_.get()));
+ }
+
+ virtual void TearDown() {
+ if (texture2_.get()) {
+ // If it's not in the manager then setting texture2_ to NULL will
+ // delete the texture.
+ if (!texture2_->client_id()) {
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(
+ *gl_,
+ DeleteTextures(1, ::testing::Pointee(texture2_->service_id())))
+ .Times(1).RetiresOnSaturation();
+ }
+ texture2_ = NULL;
+ }
+ TextureTest::TearDown();
+ }
+
+ protected:
+ struct LevelInfo {
+ LevelInfo(GLenum target,
+ GLenum format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum type,
+ bool cleared)
+ : target(target),
+ format(format),
+ width(width),
+ height(height),
+ depth(depth),
+ border(border),
+ type(type),
+ cleared(cleared) {}
+
+ LevelInfo()
+ : target(0),
+ format(0),
+ width(-1),
+ height(-1),
+ depth(1),
+ border(0),
+ type(0),
+ cleared(false) {}
+
+ bool operator==(const LevelInfo& other) const {
+ return target == other.target && format == other.format &&
+ width == other.width && height == other.height &&
+ depth == other.depth && border == other.border &&
+ type == other.type && cleared == other.cleared;
+ }
+
+ GLenum target;
+ GLenum format;
+ GLsizei width;
+ GLsizei height;
+ GLsizei depth;
+ GLint border;
+ GLenum type;
+ bool cleared;
+ };
+
+ void SetLevelInfo(TextureRef* texture_ref,
+ GLint level,
+ const LevelInfo& info) {
+ manager_->SetLevelInfo(texture_ref,
+ info.target,
+ level,
+ info.format,
+ info.width,
+ info.height,
+ info.depth,
+ info.border,
+ info.format,
+ info.type,
+ info.cleared);
+ }
+
+ static LevelInfo GetLevelInfo(const TextureRef* texture_ref,
+ GLint target,
+ GLint level) {
+ const Texture* texture = texture_ref->texture();
+ LevelInfo info;
+ info.target = target;
+ EXPECT_TRUE(texture->GetLevelSize(target, level, &info.width,
+ &info.height));
+ EXPECT_TRUE(texture->GetLevelType(target, level, &info.type,
+ &info.format));
+ info.cleared = texture->IsLevelCleared(target, level);
+ return info;
+ }
+
+ Texture* Produce(TextureRef* texture_ref) {
+ Texture* texture = manager_->Produce(texture_ref);
+ EXPECT_TRUE(texture != NULL);
+ return texture;
+ }
+
+ void Consume(GLuint client_id, Texture* texture) {
+ EXPECT_TRUE(manager_->Consume(client_id, texture));
+ }
+
+ scoped_refptr<TextureRef> texture2_;
+
+ private:
+ static const GLuint kClient2Id;
+ static const GLuint kService2Id;
+};
+
+const GLuint ProduceConsumeTextureTest::kClient2Id = 2;
+const GLuint ProduceConsumeTextureTest::kService2Id = 12;
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsume2D) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_2D, GL_RGBA, 4, 4, 1, 0, GL_UNSIGNED_BYTE, true);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture_ref_.get()));
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ LevelInfo level1 = GetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 1);
+ LevelInfo level2 = GetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 2);
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ // Make this texture bigger with more levels, and make sure they get
+ // clobbered correctly during Consume().
+ manager_->SetTarget(texture2_.get(), GL_TEXTURE_2D);
+ SetLevelInfo(
+ texture2_.get(),
+ 0,
+ LevelInfo(GL_TEXTURE_2D, GL_RGBA, 16, 16, 1, 0, GL_UNSIGNED_BYTE, false));
+ EXPECT_TRUE(manager_->MarkMipmapsGenerated(texture2_.get()));
+ texture = texture2_->texture();
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ EXPECT_EQ(1024U + 256U + 64U + 16U + 4U, texture->estimated_size());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(level0, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 0));
+ EXPECT_EQ(level1, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 1));
+ EXPECT_EQ(level2, GetLevelInfo(restored_texture.get(), GL_TEXTURE_2D, 2));
+ texture = restored_texture->texture();
+ EXPECT_EQ(64U + 16U + 4U, texture->estimated_size());
+ GLint w, h;
+ EXPECT_FALSE(texture->GetLevelSize(GL_TEXTURE_2D, 3, &w, &h));
+
+ // However the old texture ref still exists if it was referenced somewhere.
+ EXPECT_EQ(1024U + 256U + 64U + 16U + 4U,
+ texture2_->texture()->estimated_size());
+}
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeClearRectangle) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_RECTANGLE_ARB);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_RECTANGLE_ARB), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_RECTANGLE_ARB, GL_RGBA, 1, 1, 1, 0, GL_UNSIGNED_BYTE, false);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_RECTANGLE_ARB),
+ produced_texture->target());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+
+ // See if we can clear the previously uncleared level now.
+ EXPECT_EQ(level0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_RECTANGLE_ARB, 0));
+ EXPECT_CALL(*decoder_, ClearLevel(_, _, _, _, _, _, _, _, _, _))
+ .WillRepeatedly(Return(true));
+ EXPECT_TRUE(manager_->ClearTextureLevel(
+ decoder_.get(), restored_texture.get(), GL_TEXTURE_RECTANGLE_ARB, 0));
+}
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeExternal) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES), texture->target());
+ LevelInfo level0(
+ GL_TEXTURE_EXTERNAL_OES, GL_RGBA, 1, 1, 1, 0, GL_UNSIGNED_BYTE, false);
+ SetLevelInfo(texture_ref_.get(), 0, level0);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(level0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_EXTERNAL_OES, 0));
+}
+
+TEST_P(ProduceConsumeTextureTest, ProduceConsumeTextureWithImage) {
+ GLenum target = GetParam();
+ manager_->SetTarget(texture_ref_.get(), target);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(target), texture->target());
+ scoped_refptr<gfx::GLImage> image(new gfx::GLImageStub);
+ manager_->SetLevelInfo(texture_ref_.get(),
+ target,
+ 0,
+ GL_RGBA,
+ 0,
+ 0,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ manager_->SetLevelImage(texture_ref_.get(), target, 0, image.get());
+ GLuint service_id = texture->service_id();
+ Texture* produced_texture = Produce(texture_ref_.get());
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(service_id, restored_texture->service_id());
+ EXPECT_EQ(image.get(), restored_texture->texture()->GetLevelImage(target, 0));
+}
+
+static const GLenum kTextureTargets[] = {GL_TEXTURE_2D, GL_TEXTURE_EXTERNAL_OES,
+ GL_TEXTURE_RECTANGLE_ARB, };
+
+INSTANTIATE_TEST_CASE_P(Target,
+ ProduceConsumeTextureTest,
+ ::testing::ValuesIn(kTextureTargets));
+
+TEST_F(ProduceConsumeTextureTest, ProduceConsumeCube) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_CUBE_MAP);
+ Texture* texture = texture_ref_->texture();
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_CUBE_MAP), texture->target());
+ LevelInfo face0(GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_UNSIGNED_BYTE,
+ true);
+ LevelInfo face5(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ GL_RGBA,
+ 3,
+ 3,
+ 1,
+ 0,
+ GL_UNSIGNED_BYTE,
+ true);
+ SetLevelInfo(texture_ref_.get(), 0, face0);
+ SetLevelInfo(texture_ref_.get(), 0, face5);
+ EXPECT_TRUE(TextureTestHelper::IsTextureComplete(texture));
+ Texture* produced_texture = Produce(texture_ref_.get());
+ EXPECT_EQ(produced_texture, texture);
+
+ GLuint client_id = texture2_->client_id();
+ manager_->RemoveTexture(client_id);
+ Consume(client_id, produced_texture);
+ scoped_refptr<TextureRef> restored_texture = manager_->GetTexture(client_id);
+ EXPECT_EQ(produced_texture, restored_texture->texture());
+ EXPECT_EQ(
+ face0,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0));
+ EXPECT_EQ(
+ face5,
+ GetLevelInfo(restored_texture.get(), GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0));
+}
+
+class CountingMemoryTracker : public MemoryTracker {
+ public:
+ CountingMemoryTracker() {
+ current_size_[0] = 0;
+ current_size_[1] = 0;
+ }
+
+ virtual void TrackMemoryAllocatedChange(size_t old_size,
+ size_t new_size,
+ Pool pool) OVERRIDE {
+ DCHECK_LT(static_cast<size_t>(pool), arraysize(current_size_));
+ current_size_[pool] += new_size - old_size;
+ }
+
+ virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
+ return true;
+ }
+
+ size_t GetSize(Pool pool) {
+ DCHECK_LT(static_cast<size_t>(pool), arraysize(current_size_));
+ return current_size_[pool];
+ }
+
+ private:
+ virtual ~CountingMemoryTracker() {}
+
+ size_t current_size_[2];
+ DISALLOW_COPY_AND_ASSIGN(CountingMemoryTracker);
+};
+
+class SharedTextureTest : public GpuServiceTest {
+ public:
+ static const bool kUseDefaultTextures = false;
+
+ SharedTextureTest() : feature_info_(new FeatureInfo()) {}
+
+ virtual ~SharedTextureTest() {
+ }
+
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ memory_tracker1_ = new CountingMemoryTracker;
+ texture_manager1_.reset(
+ new TextureManager(memory_tracker1_.get(),
+ feature_info_.get(),
+ TextureManagerTest::kMaxTextureSize,
+ TextureManagerTest::kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ memory_tracker2_ = new CountingMemoryTracker;
+ texture_manager2_.reset(
+ new TextureManager(memory_tracker2_.get(),
+ feature_info_.get(),
+ TextureManagerTest::kMaxTextureSize,
+ TextureManagerTest::kMaxCubeMapTextureSize,
+ kUseDefaultTextures));
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ texture_manager1_->Initialize();
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), "", kUseDefaultTextures);
+ texture_manager2_->Initialize();
+ }
+
+ virtual void TearDown() {
+ texture_manager2_->Destroy(false);
+ texture_manager2_.reset();
+ texture_manager1_->Destroy(false);
+ texture_manager1_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ protected:
+ scoped_refptr<FeatureInfo> feature_info_;
+ scoped_refptr<CountingMemoryTracker> memory_tracker1_;
+ scoped_ptr<TextureManager> texture_manager1_;
+ scoped_refptr<CountingMemoryTracker> memory_tracker2_;
+ scoped_ptr<TextureManager> texture_manager2_;
+};
+
+TEST_F(SharedTextureTest, DeleteTextures) {
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(0);
+ ref1 = NULL;
+ texture_manager1_->RemoveTexture(10);
+ testing::Mock::VerifyAndClearExpectations(gl_.get());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ref2 = NULL;
+ texture_manager2_->RemoveTexture(20);
+ testing::Mock::VerifyAndClearExpectations(gl_.get());
+}
+
+TEST_F(SharedTextureTest, TextureSafetyAccounting) {
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ // Newly created texture is renderable.
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+
+ // Associate new texture ref to other texture manager, should account for it
+ // too.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ // Make texture renderable but uncleared on one texture manager, should affect
+ // other one.
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ EXPECT_TRUE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_TRUE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+ EXPECT_FALSE(texture_manager1_->HaveUnrenderableTextures());
+ EXPECT_TRUE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_TRUE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnrenderableTextures());
+ EXPECT_TRUE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_TRUE(texture_manager2_->HaveUnclearedMips());
+
+ // Make texture cleared on one texture manager, should affect other one.
+ texture_manager1_->SetLevelCleared(ref1.get(), GL_TEXTURE_2D, 0, true);
+ EXPECT_FALSE(texture_manager1_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager1_->HaveUnclearedMips());
+ EXPECT_FALSE(texture_manager2_->HaveUnsafeTextures());
+ EXPECT_FALSE(texture_manager2_->HaveUnclearedMips());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+TEST_F(SharedTextureTest, FBOCompletenessCheck) {
+ const GLenum kCompleteValue = GL_FRAMEBUFFER_COMPLETE;
+ FramebufferManager framebuffer_manager1(1, 1);
+ texture_manager1_->set_framebuffer_manager(&framebuffer_manager1);
+ FramebufferManager framebuffer_manager2(1, 1);
+ texture_manager2_->set_framebuffer_manager(&framebuffer_manager2);
+
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ framebuffer_manager1.CreateFramebuffer(10, 10);
+ scoped_refptr<Framebuffer> framebuffer1 =
+ framebuffer_manager1.GetFramebuffer(10);
+ framebuffer1->AttachTexture(
+ GL_COLOR_ATTACHMENT0, ref1.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_NE(kCompleteValue, framebuffer1->IsPossiblyComplete());
+
+ // Make FBO complete in manager 1.
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_EQ(kCompleteValue, framebuffer1->IsPossiblyComplete());
+ framebuffer_manager1.MarkAsComplete(framebuffer1.get());
+ EXPECT_TRUE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+
+ // Share texture with manager 2.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ framebuffer_manager2.CreateFramebuffer(20, 20);
+ scoped_refptr<Framebuffer> framebuffer2 =
+ framebuffer_manager2.GetFramebuffer(20);
+ framebuffer2->AttachTexture(
+ GL_COLOR_ATTACHMENT0, ref2.get(), GL_TEXTURE_2D, 0, 0);
+ EXPECT_FALSE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer2->IsPossiblyComplete());
+ framebuffer_manager2.MarkAsComplete(framebuffer2.get());
+ EXPECT_TRUE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+
+ // Change level for texture, both FBOs should be marked incomplete
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1,
+ 1,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer1->IsPossiblyComplete());
+ framebuffer_manager1.MarkAsComplete(framebuffer1.get());
+ EXPECT_TRUE(framebuffer_manager1.IsComplete(framebuffer1.get()));
+ EXPECT_FALSE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+ EXPECT_EQ(kCompleteValue, framebuffer2->IsPossiblyComplete());
+ framebuffer_manager2.MarkAsComplete(framebuffer2.get());
+ EXPECT_TRUE(framebuffer_manager2.IsComplete(framebuffer2.get()));
+
+ EXPECT_CALL(*gl_, DeleteFramebuffersEXT(1, _))
+ .Times(2)
+ .RetiresOnSaturation();
+ framebuffer_manager1.RemoveFramebuffer(10);
+ framebuffer_manager2.RemoveFramebuffer(20);
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+TEST_F(SharedTextureTest, Memory) {
+ size_t initial_memory1 = memory_tracker1_->GetSize(MemoryTracker::kUnmanaged);
+ size_t initial_memory2 = memory_tracker2_->GetSize(MemoryTracker::kUnmanaged);
+
+ // Newly created texture is unrenderable.
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 10,
+ 10,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ false);
+
+ EXPECT_LT(0u, ref1->texture()->estimated_size());
+ EXPECT_EQ(initial_memory1 + ref1->texture()->estimated_size(),
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+
+ // Associate new texture ref to other texture manager, it doesn't account for
+ // the texture memory, the first memory tracker still has it.
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+ EXPECT_EQ(initial_memory1 + ref1->texture()->estimated_size(),
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(initial_memory2,
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+
+ // Delete the texture, memory should go to the remaining tracker.
+ texture_manager1_->RemoveTexture(10);
+ ref1 = NULL;
+ EXPECT_EQ(initial_memory1,
+ memory_tracker1_->GetSize(MemoryTracker::kUnmanaged));
+ EXPECT_EQ(initial_memory2 + ref2->texture()->estimated_size(),
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ ref2 = NULL;
+ texture_manager2_->RemoveTexture(20);
+ EXPECT_EQ(initial_memory2,
+ memory_tracker2_->GetSize(MemoryTracker::kUnmanaged));
+}
+
+TEST_F(SharedTextureTest, Images) {
+ scoped_refptr<TextureRef> ref1 = texture_manager1_->CreateTexture(10, 10);
+ scoped_refptr<TextureRef> ref2 =
+ texture_manager2_->Consume(20, ref1->texture());
+
+ texture_manager1_->SetTarget(ref1.get(), GL_TEXTURE_2D);
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(ref1->texture()->HasImages());
+ EXPECT_FALSE(ref2->texture()->HasImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+ EXPECT_FALSE(texture_manager2_->HaveImages());
+ scoped_refptr<gfx::GLImage> image1(new gfx::GLImageStub);
+ texture_manager1_->SetLevelImage(ref1.get(), GL_TEXTURE_2D, 1, image1.get());
+ EXPECT_TRUE(ref1->texture()->HasImages());
+ EXPECT_TRUE(ref2->texture()->HasImages());
+ EXPECT_TRUE(texture_manager1_->HaveImages());
+ EXPECT_TRUE(texture_manager2_->HaveImages());
+ scoped_refptr<gfx::GLImage> image2(new gfx::GLImageStub);
+ texture_manager1_->SetLevelImage(ref1.get(), GL_TEXTURE_2D, 1, image2.get());
+ EXPECT_TRUE(ref1->texture()->HasImages());
+ EXPECT_TRUE(ref2->texture()->HasImages());
+ EXPECT_TRUE(texture_manager1_->HaveImages());
+ EXPECT_TRUE(texture_manager2_->HaveImages());
+ texture_manager1_->SetLevelInfo(ref1.get(),
+ GL_TEXTURE_2D,
+ 1,
+ GL_RGBA,
+ 2,
+ 2,
+ 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ true);
+ EXPECT_FALSE(ref1->texture()->HasImages());
+ EXPECT_FALSE(ref2->texture()->HasImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+ EXPECT_FALSE(texture_manager1_->HaveImages());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ texture_manager1_->RemoveTexture(10);
+ texture_manager2_->RemoveTexture(20);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/transfer_buffer_manager.cc b/gpu/command_buffer/service/transfer_buffer_manager.cc
new file mode 100644
index 0000000..4404a9e
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/debug/trace_event.h"
+#include "base/process/process_handle.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+using ::base::SharedMemory;
+
+namespace gpu {
+
+TransferBufferManagerInterface::~TransferBufferManagerInterface() {
+}
+
+TransferBufferManager::TransferBufferManager()
+ : shared_memory_bytes_allocated_(0) {
+}
+
+TransferBufferManager::~TransferBufferManager() {
+ while (!registered_buffers_.empty()) {
+ BufferMap::iterator it = registered_buffers_.begin();
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ registered_buffers_.erase(it);
+ }
+ DCHECK(!shared_memory_bytes_allocated_);
+}
+
+bool TransferBufferManager::Initialize() {
+ return true;
+}
+
+bool TransferBufferManager::RegisterTransferBuffer(
+ int32 id,
+ scoped_ptr<BufferBacking> buffer_backing) {
+ if (id <= 0) {
+ DVLOG(0) << "Cannot register transfer buffer with non-positive ID.";
+ return false;
+ }
+
+ // Fail if the ID is in use.
+ if (registered_buffers_.find(id) != registered_buffers_.end()) {
+ DVLOG(0) << "Buffer ID already in use.";
+ return false;
+ }
+
+ // Register the shared memory with the ID.
+ scoped_refptr<Buffer> buffer(new gpu::Buffer(buffer_backing.Pass()));
+
+ // Check buffer alignment is sane.
+ DCHECK(!(reinterpret_cast<uintptr_t>(buffer->memory()) &
+ (kCommandBufferEntrySize - 1)));
+
+ shared_memory_bytes_allocated_ += buffer->size();
+ TRACE_COUNTER_ID1(
+ "gpu", "GpuTransferBufferMemory", this, shared_memory_bytes_allocated_);
+
+ registered_buffers_[id] = buffer;
+
+ return true;
+}
+
+void TransferBufferManager::DestroyTransferBuffer(int32 id) {
+ BufferMap::iterator it = registered_buffers_.find(id);
+ if (it == registered_buffers_.end()) {
+ DVLOG(0) << "Transfer buffer ID was not registered.";
+ return;
+ }
+
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ TRACE_COUNTER_ID1(
+ "gpu", "GpuTransferBufferMemory", this, shared_memory_bytes_allocated_);
+
+ registered_buffers_.erase(it);
+}
+
+scoped_refptr<Buffer> TransferBufferManager::GetTransferBuffer(int32 id) {
+ if (id == 0)
+ return NULL;
+
+ BufferMap::iterator it = registered_buffers_.find(id);
+ if (it == registered_buffers_.end())
+ return NULL;
+
+ return it->second;
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/service/transfer_buffer_manager.h b/gpu/command_buffer/service/transfer_buffer_manager.h
new file mode 100644
index 0000000..d8bb3bb
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/command_buffer/common/command_buffer_shared.h"
+
+namespace gpu {
+
+class GPU_EXPORT TransferBufferManagerInterface {
+ public:
+ virtual ~TransferBufferManagerInterface();
+
+ virtual bool RegisterTransferBuffer(int32 id,
+ scoped_ptr<BufferBacking> buffer) = 0;
+ virtual void DestroyTransferBuffer(int32 id) = 0;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) = 0;
+};
+
+class GPU_EXPORT TransferBufferManager
+ : public TransferBufferManagerInterface {
+ public:
+ TransferBufferManager();
+
+ bool Initialize();
+ virtual bool RegisterTransferBuffer(int32 id,
+ scoped_ptr<BufferBacking> buffer_backing)
+ OVERRIDE;
+ virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
+ virtual scoped_refptr<Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+
+ private:
+ virtual ~TransferBufferManager();
+
+ typedef base::hash_map<int32, scoped_refptr<Buffer> > BufferMap;
+ BufferMap registered_buffers_;
+ size_t shared_memory_bytes_allocated_;
+
+ DISALLOW_COPY_AND_ASSIGN(TransferBufferManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TRANSFER_BUFFER_MANAGER_H_
diff --git a/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc b/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc
new file mode 100644
index 0000000..b047978
--- /dev/null
+++ b/gpu/command_buffer/service/transfer_buffer_manager_unittest.cc
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using base::SharedMemory;
+
+namespace gpu {
+
+const static size_t kBufferSize = 1024;
+
+class TransferBufferManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ ASSERT_TRUE(manager->Initialize());
+ }
+
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+};
+
+TEST_F(TransferBufferManagerTest, ZeroHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(0).get());
+}
+
+TEST_F(TransferBufferManagerTest, NegativeHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(-1).get());
+}
+
+TEST_F(TransferBufferManagerTest, OutOfRangeHandleMapsToNull) {
+ EXPECT_TRUE(NULL == transfer_buffer_manager_->GetTransferBuffer(1).get());
+}
+
+TEST_F(TransferBufferManagerTest, CanRegisterTransferBuffer) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kBufferSize);
+ base::SharedMemory* shm_raw_pointer = shm.get();
+ scoped_ptr<SharedMemoryBufferBacking> backing(
+ new SharedMemoryBufferBacking(shm.Pass(), kBufferSize));
+ SharedMemoryBufferBacking* backing_raw_ptr = backing.get();
+
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, backing.PassAs<BufferBacking>()));
+ scoped_refptr<Buffer> registered =
+ transfer_buffer_manager_->GetTransferBuffer(1);
+
+ // Shared-memory ownership is transfered. It should be the same memory.
+ EXPECT_EQ(backing_raw_ptr, registered->backing());
+ EXPECT_EQ(shm_raw_pointer, backing_raw_ptr->shared_memory());
+}
+
+class FakeBufferBacking : public BufferBacking {
+ public:
+ virtual void* GetMemory() const OVERRIDE {
+ return reinterpret_cast<void*>(0xBADF00D0);
+ }
+ virtual size_t GetSize() const OVERRIDE { return 42; }
+ static scoped_ptr<BufferBacking> Make() {
+ return scoped_ptr<BufferBacking>(new FakeBufferBacking);
+ }
+};
+
+TEST_F(TransferBufferManagerTest, CanDestroyTransferBuffer) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, scoped_ptr<BufferBacking>(new FakeBufferBacking)));
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+ scoped_refptr<Buffer> registered =
+ transfer_buffer_manager_->GetTransferBuffer(1);
+
+ scoped_refptr<Buffer> null_buffer;
+ EXPECT_EQ(null_buffer, registered);
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegregisterTransferBufferId) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, CanReuseTransferBufferIdAfterDestroying) {
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+ EXPECT_TRUE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 1, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, DestroyUnusedTransferBufferIdDoesNotCrash) {
+ transfer_buffer_manager_->DestroyTransferBuffer(1);
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegisterNullTransferBuffer) {
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ 0, FakeBufferBacking::Make()));
+}
+
+TEST_F(TransferBufferManagerTest, CannotRegisterNegativeTransferBufferId) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ shm->CreateAndMapAnonymous(kBufferSize);
+ EXPECT_FALSE(transfer_buffer_manager_->RegisterTransferBuffer(
+ -1, FakeBufferBacking::Make()));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/service/vertex_array_manager.cc b/gpu/command_buffer/service/vertex_array_manager.cc
new file mode 100644
index 0000000..1560c04
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+VertexArrayManager::VertexArrayManager()
+ : vertex_attrib_manager_count_(0),
+ have_context_(true) {
+}
+
+VertexArrayManager::~VertexArrayManager() {
+ DCHECK(vertex_attrib_managers_.empty());
+ CHECK_EQ(vertex_attrib_manager_count_, 0u);
+}
+
+void VertexArrayManager::Destroy(bool have_context) {
+ have_context_ = have_context;
+ vertex_attrib_managers_.clear();
+}
+
+scoped_refptr<VertexAttribManager>
+VertexArrayManager::CreateVertexAttribManager(GLuint client_id,
+ GLuint service_id,
+ uint32 num_vertex_attribs,
+ bool client_visible) {
+ scoped_refptr<VertexAttribManager> vertex_attrib_manager(
+ new VertexAttribManager(this, service_id, num_vertex_attribs));
+
+ if (client_visible) {
+ std::pair<VertexAttribManagerMap::iterator, bool> result =
+ vertex_attrib_managers_.insert(
+ std::make_pair(client_id, vertex_attrib_manager));
+ DCHECK(result.second);
+ }
+
+ return vertex_attrib_manager;
+}
+
+VertexAttribManager* VertexArrayManager::GetVertexAttribManager(
+ GLuint client_id) {
+ VertexAttribManagerMap::iterator it = vertex_attrib_managers_.find(client_id);
+ return it != vertex_attrib_managers_.end() ? it->second.get() : NULL;
+}
+
+void VertexArrayManager::RemoveVertexAttribManager(GLuint client_id) {
+ VertexAttribManagerMap::iterator it = vertex_attrib_managers_.find(client_id);
+ if (it != vertex_attrib_managers_.end()) {
+ VertexAttribManager* vertex_attrib_manager = it->second.get();
+ vertex_attrib_manager->MarkAsDeleted();
+ vertex_attrib_managers_.erase(it);
+ }
+}
+
+void VertexArrayManager::StartTracking(
+ VertexAttribManager* /* vertex_attrib_manager */) {
+ ++vertex_attrib_manager_count_;
+}
+
+void VertexArrayManager::StopTracking(
+ VertexAttribManager* /* vertex_attrib_manager */) {
+ --vertex_attrib_manager_count_;
+}
+
+bool VertexArrayManager::GetClientId(
+ GLuint service_id, GLuint* client_id) const {
+ // This doesn't need to be fast. It's only used during slow queries.
+ for (VertexAttribManagerMap::const_iterator it =
+ vertex_attrib_managers_.begin();
+ it != vertex_attrib_managers_.end(); ++it) {
+ if (it->second->service_id() == service_id) {
+ *client_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/vertex_array_manager.h b/gpu/command_buffer/service/vertex_array_manager.h
new file mode 100644
index 0000000..97ecc1a
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class VertexAttribManager;
+
+// This class keeps track of the vertex arrays and their sizes so we can do
+// bounds checking.
+class GPU_EXPORT VertexArrayManager {
+ public:
+ VertexArrayManager();
+ ~VertexArrayManager();
+
+ // Must call before destruction.
+ void Destroy(bool have_context);
+
+ // Creates a VertexAttribManager and if client_visible,
+ // maps it to the client_id.
+ scoped_refptr<VertexAttribManager> CreateVertexAttribManager(
+ GLuint client_id,
+ GLuint service_id,
+ uint32 num_vertex_attribs,
+ bool client_visible);
+
+ // Gets the vertex attrib manager for the given vertex array.
+ VertexAttribManager* GetVertexAttribManager(GLuint client_id);
+
+ // Removes the vertex attrib manager for the given vertex array.
+ void RemoveVertexAttribManager(GLuint client_id);
+
+ // Gets a client id for a given service id.
+ bool GetClientId(GLuint service_id, GLuint* client_id) const;
+
+ private:
+ friend class VertexAttribManager;
+
+ void StartTracking(VertexAttribManager* vertex_attrib_manager);
+ void StopTracking(VertexAttribManager* vertex_attrib_manager);
+
+ // Info for each vertex array in the system.
+ typedef base::hash_map<GLuint, scoped_refptr<VertexAttribManager> >
+ VertexAttribManagerMap;
+ VertexAttribManagerMap vertex_attrib_managers_;
+
+ // Counts the number of VertexArrayInfo allocated with 'this' as its manager.
+ // Allows to check no VertexArrayInfo will outlive this.
+ unsigned int vertex_attrib_manager_count_;
+
+ bool have_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_VERTEX_ARRAY_MANAGER_H_
diff --git a/gpu/command_buffer/service/vertex_array_manager_unittest.cc b/gpu/command_buffer/service/vertex_array_manager_unittest.cc
new file mode 100644
index 0000000..aa2df35
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_array_manager_unittest.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Pointee;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class VertexArrayManagerTest : public GpuServiceTest {
+ public:
+ static const uint32 kNumVertexAttribs = 8;
+
+ VertexArrayManagerTest() {
+ }
+
+ virtual ~VertexArrayManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+ manager_.reset(new VertexArrayManager());
+ }
+
+ virtual void TearDown() {
+ manager_.reset();
+ GpuServiceTest::TearDown();
+ }
+
+ scoped_ptr<VertexArrayManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const uint32 VertexArrayManagerTest::kNumVertexAttribs;
+#endif
+
+TEST_F(VertexArrayManagerTest, Basic) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ const GLuint kClient2Id = 2;
+
+ // Check we can create
+ manager_->CreateVertexAttribManager(
+ kClient1Id, kService1Id, kNumVertexAttribs, true);
+ // Check creation success
+ VertexAttribManager* info1 = manager_->GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 != NULL);
+ EXPECT_EQ(kService1Id, info1->service_id());
+ GLuint client_id = 0;
+ EXPECT_TRUE(manager_->GetClientId(info1->service_id(), &client_id));
+ EXPECT_EQ(kClient1Id, client_id);
+ // Check we get nothing for a non-existent name.
+ EXPECT_TRUE(manager_->GetVertexAttribManager(kClient2Id) == NULL);
+ // Check trying to a remove non-existent name does not crash.
+ manager_->RemoveVertexAttribManager(kClient2Id);
+ // Check that it gets deleted when the last reference is released.
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ // Check we can't get the texture after we remove it.
+ manager_->RemoveVertexAttribManager(kClient1Id);
+ EXPECT_TRUE(manager_->GetVertexAttribManager(kClient1Id) == NULL);
+}
+
+TEST_F(VertexArrayManagerTest, Destroy) {
+ const GLuint kClient1Id = 1;
+ const GLuint kService1Id = 11;
+ VertexArrayManager manager;
+ // Check we can create
+ manager.CreateVertexAttribManager(
+ kClient1Id, kService1Id, kNumVertexAttribs, true);
+ // Check creation success
+ VertexAttribManager* info1 = manager.GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 != NULL);
+ EXPECT_CALL(*gl_, DeleteVertexArraysOES(1, ::testing::Pointee(kService1Id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ manager.Destroy(true);
+ // Check that resources got freed.
+ info1 = manager.GetVertexAttribManager(kClient1Id);
+ ASSERT_TRUE(info1 == NULL);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/service/vertex_attrib_manager.cc b/gpu/command_buffer/service/vertex_attrib_manager.cc
new file mode 100644
index 0000000..8725c4f
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include <list>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+#define GLES2_GPU_SERVICE 1
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/vertex_array_manager.h"
+
+namespace gpu {
+namespace gles2 {
+
+VertexAttrib::VertexAttrib()
+ : index_(0),
+ enabled_(false),
+ size_(4),
+ type_(GL_FLOAT),
+ offset_(0),
+ normalized_(GL_FALSE),
+ gl_stride_(0),
+ real_stride_(16),
+ divisor_(0),
+ is_client_side_array_(false),
+ list_(NULL) {
+}
+
+VertexAttrib::~VertexAttrib() {
+}
+
+void VertexAttrib::SetInfo(
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset) {
+ DCHECK_GT(real_stride, 0);
+ buffer_ = buffer;
+ size_ = size;
+ type_ = type;
+ normalized_ = normalized;
+ gl_stride_ = gl_stride;
+ real_stride_ = real_stride;
+ offset_ = offset;
+}
+
+void VertexAttrib::Unbind(Buffer* buffer) {
+ if (buffer_.get() == buffer) {
+ buffer_ = NULL;
+ }
+}
+
+bool VertexAttrib::CanAccess(GLuint index) const {
+ if (!enabled_) {
+ return true;
+ }
+
+ if (!buffer_.get() || buffer_->IsDeleted()) {
+ return false;
+ }
+
+ // The number of elements that can be accessed.
+ GLsizeiptr buffer_size = buffer_->size();
+ if (offset_ > buffer_size || real_stride_ == 0) {
+ return false;
+ }
+
+ uint32 usable_size = buffer_size - offset_;
+ GLuint num_elements = usable_size / real_stride_ +
+ ((usable_size % real_stride_) >=
+ (GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type_) * size_) ? 1 : 0);
+ return index < num_elements;
+}
+
+VertexAttribManager::VertexAttribManager()
+ : num_fixed_attribs_(0),
+ element_array_buffer_(NULL),
+ manager_(NULL),
+ deleted_(false),
+ service_id_(0) {
+}
+
+VertexAttribManager::VertexAttribManager(
+ VertexArrayManager* manager, GLuint service_id, uint32 num_vertex_attribs)
+ : num_fixed_attribs_(0),
+ element_array_buffer_(NULL),
+ manager_(manager),
+ deleted_(false),
+ service_id_(service_id) {
+ manager_->StartTracking(this);
+ Initialize(num_vertex_attribs, false);
+}
+
+VertexAttribManager::~VertexAttribManager() {
+ if (manager_) {
+ if (manager_->have_context_) {
+ if (service_id_ != 0) // 0 indicates an emulated VAO
+ glDeleteVertexArraysOES(1, &service_id_);
+ }
+ manager_->StopTracking(this);
+ manager_ = NULL;
+ }
+}
+
+void VertexAttribManager::Initialize(
+ uint32 max_vertex_attribs, bool init_attribs) {
+ vertex_attribs_.resize(max_vertex_attribs);
+
+ for (uint32 vv = 0; vv < vertex_attribs_.size(); ++vv) {
+ vertex_attribs_[vv].set_index(vv);
+ vertex_attribs_[vv].SetList(&disabled_vertex_attribs_);
+
+ if (init_attribs) {
+ glVertexAttrib4f(vv, 0.0f, 0.0f, 0.0f, 1.0f);
+ }
+ }
+}
+
+void VertexAttribManager::SetElementArrayBuffer(Buffer* buffer) {
+ element_array_buffer_ = buffer;
+}
+
+bool VertexAttribManager::Enable(GLuint index, bool enable) {
+ if (index >= vertex_attribs_.size()) {
+ return false;
+ }
+ VertexAttrib& info = vertex_attribs_[index];
+ if (info.enabled() != enable) {
+ info.set_enabled(enable);
+ info.SetList(enable ? &enabled_vertex_attribs_ : &disabled_vertex_attribs_);
+ }
+ return true;
+}
+
+void VertexAttribManager::Unbind(Buffer* buffer) {
+ if (element_array_buffer_.get() == buffer) {
+ element_array_buffer_ = NULL;
+ }
+ for (uint32 vv = 0; vv < vertex_attribs_.size(); ++vv) {
+ vertex_attribs_[vv].Unbind(buffer);
+ }
+}
+
+bool VertexAttribManager::ValidateBindings(
+ const char* function_name,
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info,
+ Program* current_program,
+ GLuint max_vertex_accessed,
+ bool instanced,
+ GLsizei primcount) {
+ DCHECK(primcount);
+ ErrorState* error_state = decoder->GetErrorState();
+ // true if any enabled, used divisor is zero
+ bool divisor0 = false;
+ bool have_enabled_active_attribs = false;
+ const GLuint kInitialBufferId = 0xFFFFFFFFU;
+ GLuint current_buffer_id = kInitialBufferId;
+ bool use_client_side_arrays_for_stream_buffers = feature_info->workarounds(
+ ).use_client_side_arrays_for_stream_buffers;
+ // Validate all attribs currently enabled. If they are used by the current
+ // program then check that they have enough elements to handle the draw call.
+ // If they are not used by the current program check that they have a buffer
+ // assigned.
+ for (VertexAttribList::iterator it = enabled_vertex_attribs_.begin();
+ it != enabled_vertex_attribs_.end(); ++it) {
+ VertexAttrib* attrib = *it;
+ const Program::VertexAttrib* attrib_info =
+ current_program->GetAttribInfoByLocation(attrib->index());
+ if (attrib_info) {
+ divisor0 |= (attrib->divisor() == 0);
+ have_enabled_active_attribs = true;
+ GLuint count = attrib->MaxVertexAccessed(primcount, max_vertex_accessed);
+ // This attrib is used in the current program.
+ if (!attrib->CanAccess(count)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string(
+ "attempt to access out of range vertices in attribute ") +
+ base::IntToString(attrib->index())).c_str());
+ return false;
+ }
+ if (use_client_side_arrays_for_stream_buffers) {
+ Buffer* buffer = attrib->buffer();
+ glEnableVertexAttribArray(attrib->index());
+ if (buffer->IsClientSideArray()) {
+ if (current_buffer_id != 0) {
+ current_buffer_id = 0;
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ }
+ attrib->set_is_client_side_array(true);
+ const void* ptr = buffer->GetRange(attrib->offset(), 0);
+ DCHECK(ptr);
+ glVertexAttribPointer(
+ attrib->index(),
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+ } else if (attrib->is_client_side_array()) {
+ attrib->set_is_client_side_array(false);
+ GLuint new_buffer_id = buffer->service_id();
+ if (new_buffer_id != current_buffer_id) {
+ current_buffer_id = new_buffer_id;
+ glBindBuffer(GL_ARRAY_BUFFER, current_buffer_id);
+ }
+ const void* ptr = reinterpret_cast<const void*>(attrib->offset());
+ glVertexAttribPointer(
+ attrib->index(),
+ attrib->size(),
+ attrib->type(),
+ attrib->normalized(),
+ attrib->gl_stride(),
+ ptr);
+ }
+ }
+ } else {
+ // This attrib is not used in the current program.
+ if (!attrib->buffer()) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ (std::string(
+ "attempt to render with no buffer attached to "
+ "enabled attribute ") +
+ base::IntToString(attrib->index())).c_str());
+ return false;
+ } else if (use_client_side_arrays_for_stream_buffers) {
+ Buffer* buffer = attrib->buffer();
+ // Disable client side arrays for unused attributes else we'll
+ // read bad memory
+ if (buffer->IsClientSideArray()) {
+ // Don't disable attrib 0 since it's special.
+ if (attrib->index() > 0) {
+ glDisableVertexAttribArray(attrib->index());
+ }
+ }
+ }
+ }
+ }
+
+ // Instanced drawing needs at least one enabled attribute with divisor zero.
+ // Non-instanced drawing is fine with having no attributes at all, but if
+ // there are attributes, at least one should have divisor zero.
+ // (See ANGLE_instanced_arrays spec)
+ if (!divisor0 && (instanced || have_enabled_active_attribs)) {
+ ERRORSTATE_SET_GL_ERROR(
+ error_state, GL_INVALID_OPERATION, function_name,
+ "attempt to draw with all attributes having non-zero divisors");
+ return false;
+ }
+
+ if (current_buffer_id != kInitialBufferId) {
+ // Restore the buffer binding.
+ decoder->RestoreBufferBindings();
+ }
+
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/service/vertex_attrib_manager.h b/gpu/command_buffer/service/vertex_attrib_manager.h
new file mode 100644
index 0000000..73fa480
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager.h
@@ -0,0 +1,296 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+
+#include <list>
+#include <vector>
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "build/build_config.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/gl_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class FeatureInfo;
+class GLES2Decoder;
+class Program;
+class VertexArrayManager;
+
+// Info about a Vertex Attribute. This is used to track what the user currently
+// has bound on each Vertex Attribute so that checking can be done at
+// glDrawXXX time.
+class GPU_EXPORT VertexAttrib {
+ public:
+ typedef std::list<VertexAttrib*> VertexAttribList;
+
+ VertexAttrib();
+ ~VertexAttrib();
+
+ // Returns true if this VertexAttrib can access index.
+ bool CanAccess(GLuint index) const;
+
+ Buffer* buffer() const { return buffer_.get(); }
+
+ GLsizei offset() const {
+ return offset_;
+ }
+
+ GLuint index() const {
+ return index_;
+ }
+
+ GLint size() const {
+ return size_;
+ }
+
+ GLenum type() const {
+ return type_;
+ }
+
+ GLboolean normalized() const {
+ return normalized_;
+ }
+
+ GLsizei gl_stride() const {
+ return gl_stride_;
+ }
+
+ GLuint divisor() const {
+ return divisor_;
+ }
+
+ bool enabled() const {
+ return enabled_;
+ }
+
+ // Find the maximum vertex accessed, accounting for instancing.
+ GLuint MaxVertexAccessed(GLsizei primcount,
+ GLuint max_vertex_accessed) const {
+ return divisor_ ? ((primcount - 1) / divisor_) : max_vertex_accessed;
+ }
+
+ bool is_client_side_array() const {
+ return is_client_side_array_;
+ }
+
+ void set_is_client_side_array(bool value) {
+ is_client_side_array_ = value;
+ }
+
+ private:
+ friend class VertexAttribManager;
+
+ void set_enabled(bool enabled) {
+ enabled_ = enabled;
+ }
+
+ void set_index(GLuint index) {
+ index_ = index;
+ }
+
+ void SetList(VertexAttribList* new_list) {
+ DCHECK(new_list);
+
+ if (list_) {
+ list_->erase(it_);
+ }
+
+ it_ = new_list->insert(new_list->end(), this);
+ list_ = new_list;
+ }
+
+ void SetInfo(
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset);
+
+ void SetDivisor(GLsizei divisor) {
+ divisor_ = divisor;
+ }
+
+ void Unbind(Buffer* buffer);
+
+ // The index of this attrib.
+ GLuint index_;
+
+ // Whether or not this attribute is enabled.
+ bool enabled_;
+
+ // number of components (1, 2, 3, 4)
+ GLint size_;
+
+ // GL_BYTE, GL_FLOAT, etc. See glVertexAttribPointer.
+ GLenum type_;
+
+ // The offset into the buffer.
+ GLsizei offset_;
+
+ GLboolean normalized_;
+
+ // The stride passed to glVertexAttribPointer.
+ GLsizei gl_stride_;
+
+ // The stride that will be used to access the buffer. This is the actual
+ // stide, NOT the GL bogus stride. In other words there is never a stride
+ // of 0.
+ GLsizei real_stride_;
+
+ GLsizei divisor_;
+
+ // Will be true if this was assigned to a client side array.
+ bool is_client_side_array_;
+
+ // The buffer bound to this attribute.
+ scoped_refptr<Buffer> buffer_;
+
+ // List this info is on.
+ VertexAttribList* list_;
+
+ // Iterator for list this info is on. Enabled/Disabled
+ VertexAttribList::iterator it_;
+};
+
+// Manages vertex attributes.
+// This class also acts as the service-side representation of a
+// vertex array object and it's contained state.
+class GPU_EXPORT VertexAttribManager :
+ public base::RefCounted<VertexAttribManager> {
+ public:
+ typedef std::list<VertexAttrib*> VertexAttribList;
+
+ VertexAttribManager();
+
+ void Initialize(uint32 num_vertex_attribs, bool init_attribs);
+
+ bool Enable(GLuint index, bool enable);
+
+ bool HaveFixedAttribs() const {
+ return num_fixed_attribs_ != 0;
+ }
+
+ const VertexAttribList& GetEnabledVertexAttribs() const {
+ return enabled_vertex_attribs_;
+ }
+
+ VertexAttrib* GetVertexAttrib(GLuint index) {
+ if (index < vertex_attribs_.size()) {
+ return &vertex_attribs_[index];
+ }
+ return NULL;
+ }
+
+ void SetAttribInfo(
+ GLuint index,
+ Buffer* buffer,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ GLsizei real_stride,
+ GLsizei offset) {
+ VertexAttrib* attrib = GetVertexAttrib(index);
+ if (attrib) {
+ if (attrib->type() == GL_FIXED) {
+ --num_fixed_attribs_;
+ }
+ if (type == GL_FIXED) {
+ ++num_fixed_attribs_;
+ }
+ attrib->SetInfo(
+ buffer, size, type, normalized, gl_stride, real_stride, offset);
+ }
+ }
+
+ void SetDivisor(GLuint index, GLuint divisor) {
+ VertexAttrib* attrib = GetVertexAttrib(index);
+ if (attrib) {
+ attrib->SetDivisor(divisor);
+ }
+ }
+
+ void SetElementArrayBuffer(Buffer* buffer);
+
+ Buffer* element_array_buffer() const { return element_array_buffer_.get(); }
+
+ GLuint service_id() const {
+ return service_id_;
+ }
+
+ void Unbind(Buffer* buffer);
+
+ bool IsDeleted() const {
+ return deleted_;
+ }
+
+ bool IsValid() const {
+ return !IsDeleted();
+ }
+
+ size_t num_attribs() const {
+ return vertex_attribs_.size();
+ }
+
+ bool ValidateBindings(
+ const char* function_name,
+ GLES2Decoder* decoder,
+ FeatureInfo* feature_info,
+ Program* current_program,
+ GLuint max_vertex_accessed,
+ bool instanced,
+ GLsizei primcount);
+
+ private:
+ friend class VertexArrayManager;
+ friend class VertexArrayManagerTest;
+ friend class base::RefCounted<VertexAttribManager>;
+
+ // Used when creating from a VertexArrayManager
+ VertexAttribManager(VertexArrayManager* manager, GLuint service_id,
+ uint32 num_vertex_attribs);
+
+ ~VertexAttribManager();
+
+ void MarkAsDeleted() {
+ deleted_ = true;
+ }
+
+ // number of attribs using type GL_FIXED.
+ int num_fixed_attribs_;
+
+ // Info for each vertex attribute saved so we can check at glDrawXXX time
+ // if it is safe to draw.
+ std::vector<VertexAttrib> vertex_attribs_;
+
+ // The currently bound element array buffer. If this is 0 it is illegal
+ // to call glDrawElements.
+ scoped_refptr<Buffer> element_array_buffer_;
+
+ // Lists for which vertex attribs are enabled, disabled.
+ VertexAttribList enabled_vertex_attribs_;
+ VertexAttribList disabled_vertex_attribs_;
+
+ // The VertexArrayManager that owns this VertexAttribManager
+ VertexArrayManager* manager_;
+
+ // True if deleted.
+ bool deleted_;
+
+ // Service side vertex array object id.
+ GLuint service_id_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_VERTEX_ATTRIB_MANAGER_H_
+
diff --git a/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc b/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc
new file mode 100644
index 0000000..e7fd690
--- /dev/null
+++ b/gpu/command_buffer/service/vertex_attrib_manager_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/error_state_mock.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_service_test.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::testing::Pointee;
+using ::testing::_;
+
+namespace gpu {
+namespace gles2 {
+
+class VertexAttribManagerTest : public GpuServiceTest {
+ public:
+ static const uint32 kNumVertexAttribs = 8;
+
+ VertexAttribManagerTest() {
+ }
+
+ virtual ~VertexAttribManagerTest() {
+ }
+
+ protected:
+ virtual void SetUp() {
+ GpuServiceTest::SetUp();
+
+ for (uint32 ii = 0; ii < kNumVertexAttribs; ++ii) {
+ EXPECT_CALL(*gl_, VertexAttrib4f(ii, 0.0f, 0.0f, 0.0f, 1.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+
+ manager_ = new VertexAttribManager();
+ manager_->Initialize(kNumVertexAttribs, true);
+ }
+
+ scoped_refptr<VertexAttribManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const uint32 VertexAttribManagerTest::kNumVertexAttribs;
+#endif
+
+TEST_F(VertexAttribManagerTest, Basic) {
+ EXPECT_TRUE(manager_->GetVertexAttrib(kNumVertexAttribs) == NULL);
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ manager_->GetEnabledVertexAttribs();
+ EXPECT_EQ(0u, enabled_attribs.size());
+
+ for (uint32 ii = 0; ii < kNumVertexAttribs; ii += kNumVertexAttribs - 1) {
+ VertexAttrib* attrib = manager_->GetVertexAttrib(ii);
+ ASSERT_TRUE(attrib != NULL);
+ EXPECT_EQ(ii, attrib->index());
+ EXPECT_TRUE(attrib->buffer() == NULL);
+ EXPECT_EQ(0, attrib->offset());
+ EXPECT_EQ(4, attrib->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_FLOAT), attrib->type());
+ EXPECT_EQ(GL_FALSE, attrib->normalized());
+ EXPECT_EQ(0, attrib->gl_stride());
+ EXPECT_FALSE(attrib->enabled());
+ manager_->Enable(ii, true);
+ EXPECT_TRUE(attrib->enabled());
+ }
+}
+
+TEST_F(VertexAttribManagerTest, Enable) {
+ const VertexAttribManager::VertexAttribList& enabled_attribs =
+ manager_->GetEnabledVertexAttribs();
+
+ VertexAttrib* attrib1 = manager_->GetVertexAttrib(1);
+ VertexAttrib* attrib2 = manager_->GetVertexAttrib(3);
+
+ manager_->Enable(1, true);
+ ASSERT_EQ(1u, enabled_attribs.size());
+ EXPECT_TRUE(attrib1->enabled());
+ manager_->Enable(3, true);
+ ASSERT_EQ(2u, enabled_attribs.size());
+ EXPECT_TRUE(attrib2->enabled());
+
+ manager_->Enable(1, false);
+ ASSERT_EQ(1u, enabled_attribs.size());
+ EXPECT_FALSE(attrib1->enabled());
+
+ manager_->Enable(3, false);
+ ASSERT_EQ(0u, enabled_attribs.size());
+ EXPECT_FALSE(attrib2->enabled());
+}
+
+TEST_F(VertexAttribManagerTest, SetAttribInfo) {
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ Buffer* buffer = buffer_manager.GetBuffer(1);
+ ASSERT_TRUE(buffer != NULL);
+
+ VertexAttrib* attrib = manager_->GetVertexAttrib(1);
+
+ manager_->SetAttribInfo(1, buffer, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+
+ EXPECT_EQ(buffer, attrib->buffer());
+ EXPECT_EQ(4, attrib->offset());
+ EXPECT_EQ(3, attrib->size());
+ EXPECT_EQ(static_cast<GLenum>(GL_SHORT), attrib->type());
+ EXPECT_EQ(GL_TRUE, attrib->normalized());
+ EXPECT_EQ(32, attrib->gl_stride());
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+TEST_F(VertexAttribManagerTest, HaveFixedAttribs) {
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(1, NULL, 4, GL_FIXED, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(3, NULL, 4, GL_FIXED, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(1, NULL, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(manager_->HaveFixedAttribs());
+ manager_->SetAttribInfo(3, NULL, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_FALSE(manager_->HaveFixedAttribs());
+}
+
+TEST_F(VertexAttribManagerTest, CanAccess) {
+ MockErrorState error_state;
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ Buffer* buffer = buffer_manager.GetBuffer(1);
+ ASSERT_TRUE(buffer != NULL);
+
+ VertexAttrib* attrib = manager_->GetVertexAttrib(1);
+
+ EXPECT_TRUE(attrib->CanAccess(0));
+ manager_->Enable(1, true);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ EXPECT_TRUE(buffer_manager.SetTarget(buffer, GL_ARRAY_BUFFER));
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 15, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+
+ EXPECT_FALSE(attrib->CanAccess(0));
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 16, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 1);
+ EXPECT_FALSE(attrib->CanAccess(0));
+
+ TestHelper::DoBufferData(
+ gl_.get(), &error_state, &buffer_manager, buffer, 32, GL_STATIC_DRAW,
+ NULL, GL_NO_ERROR);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 16, 0);
+ EXPECT_TRUE(attrib->CanAccess(1));
+ manager_->SetAttribInfo(1, buffer, 4, GL_FLOAT, GL_FALSE, 0, 20, 0);
+ EXPECT_TRUE(attrib->CanAccess(0));
+ EXPECT_FALSE(attrib->CanAccess(1));
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+TEST_F(VertexAttribManagerTest, Unbind) {
+ BufferManager buffer_manager(NULL, NULL);
+ buffer_manager.CreateBuffer(1, 2);
+ buffer_manager.CreateBuffer(3, 4);
+ Buffer* buffer1 = buffer_manager.GetBuffer(1);
+ Buffer* buffer2 = buffer_manager.GetBuffer(3);
+ ASSERT_TRUE(buffer1 != NULL);
+ ASSERT_TRUE(buffer2 != NULL);
+
+ VertexAttrib* attrib1 = manager_->GetVertexAttrib(1);
+ VertexAttrib* attrib3 = manager_->GetVertexAttrib(3);
+
+ // Attach to 2 buffers.
+ manager_->SetAttribInfo(1, buffer1, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+ manager_->SetAttribInfo(3, buffer1, 3, GL_SHORT, GL_TRUE, 32, 32, 4);
+ // Check they were attached.
+ EXPECT_EQ(buffer1, attrib1->buffer());
+ EXPECT_EQ(buffer1, attrib3->buffer());
+ // Unbind unattached buffer.
+ manager_->Unbind(buffer2);
+ // Should be no-op.
+ EXPECT_EQ(buffer1, attrib1->buffer());
+ EXPECT_EQ(buffer1, attrib3->buffer());
+ // Unbind buffer.
+ manager_->Unbind(buffer1);
+ // Check they were detached
+ EXPECT_TRUE(NULL == attrib1->buffer());
+ EXPECT_TRUE(NULL == attrib3->buffer());
+
+ // The VertexAttribManager must be destroyed before the BufferManager
+ // so it releases its buffers.
+ manager_ = NULL;
+ buffer_manager.Destroy(false);
+}
+
+// TODO(gman): Test ValidateBindings
+// TODO(gman): Test ValidateBindings with client side arrays.
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/tests/compressed_texture_test.cc b/gpu/command_buffer/tests/compressed_texture_test.cc
new file mode 100644
index 0000000..8c214b2
--- /dev/null
+++ b/gpu/command_buffer/tests/compressed_texture_test.cc
@@ -0,0 +1,255 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/basictypes.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SHADER(src) #src
+
+namespace gpu {
+
+static const uint16 kRedMask = 0xF800;
+static const uint16 kGreenMask = 0x07E0;
+static const uint16 kBlueMask = 0x001F;
+
+// Color palette in 565 format.
+static const uint16 kPalette[] = {
+ kGreenMask | kBlueMask, // Cyan.
+ kBlueMask | kRedMask, // Magenta.
+ kRedMask | kGreenMask, // Yellow.
+ 0x0000, // Black.
+ kRedMask, // Red.
+ kGreenMask, // Green.
+ kBlueMask, // Blue.
+ 0xFFFF, // White.
+};
+static const unsigned kBlockSize = 4;
+static const unsigned kPaletteSize = sizeof(kPalette) / sizeof(kPalette[0]);
+static const unsigned kTextureWidth = kBlockSize * kPaletteSize;
+static const unsigned kTextureHeight = kBlockSize;
+
+static const char* extension(GLenum format) {
+ switch(format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ return "GL_EXT_texture_compression_dxt1";
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ return "GL_CHROMIUM_texture_compression_dxt3";
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return "GL_CHROMIUM_texture_compression_dxt5";
+ default:
+ NOTREACHED();
+ }
+ return NULL;
+}
+
+// Index that chooses the given colors (color_0 and color_1),
+// not the interpolated colors (color_2 and color_3).
+static const uint16 kColor0 = 0x0000;
+static const uint16 kColor1 = 0x5555;
+
+static GLuint LoadCompressedTexture(const void* data,
+ GLsizeiptr size,
+ GLenum format,
+ GLsizei width,
+ GLsizei height) {
+ GLuint texture;
+ glGenTextures(1, &texture);
+ glBindTexture(GL_TEXTURE_2D, texture);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glCompressedTexImage2D(
+ GL_TEXTURE_2D, 0, format, width, height, 0, size, data);
+ return texture;
+}
+
+GLuint LoadTextureDXT1(bool alpha) {
+ const unsigned kStride = 4;
+ uint16 data[kStride * kPaletteSize];
+ for (unsigned i = 0; i < kPaletteSize; ++i) {
+ // Each iteration defines a 4x4 block of texture.
+ unsigned j = kStride * i;
+ data[j++] = kPalette[i]; // color_0.
+ data[j++] = kPalette[i]; // color_1.
+ data[j++] = kColor0; // color index.
+ data[j++] = kColor1; // color index.
+ }
+ GLenum format = alpha ?
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT : GL_COMPRESSED_RGB_S3TC_DXT1_EXT;
+ return LoadCompressedTexture(
+ data, sizeof(data), format, kTextureWidth, kTextureHeight);
+}
+
+GLuint LoadTextureDXT3() {
+ const unsigned kStride = 8;
+ const uint16 kOpaque = 0xFFFF;
+ uint16 data[kStride * kPaletteSize];
+ for (unsigned i = 0; i < kPaletteSize; ++i) {
+ // Each iteration defines a 4x4 block of texture.
+ unsigned j = kStride * i;
+ data[j++] = kOpaque; // alpha row 0.
+ data[j++] = kOpaque; // alpha row 1.
+ data[j++] = kOpaque; // alpha row 2.
+ data[j++] = kOpaque; // alpha row 3.
+ data[j++] = kPalette[i]; // color_0.
+ data[j++] = kPalette[i]; // color_1.
+ data[j++] = kColor0; // color index.
+ data[j++] = kColor1; // color index.
+ }
+ return LoadCompressedTexture(data,
+ sizeof(data),
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,
+ kTextureWidth,
+ kTextureHeight);
+}
+
+GLuint LoadTextureDXT5() {
+ const unsigned kStride = 8;
+ const uint16 kClear = 0x0000;
+ const uint16 kAlpha7 = 0xFFFF; // Opaque alpha index.
+ uint16 data[kStride * kPaletteSize];
+ for (unsigned i = 0; i < kPaletteSize; ++i) {
+ // Each iteration defines a 4x4 block of texture.
+ unsigned j = kStride * i;
+ data[j++] = kClear; // alpha_0 | alpha_1.
+ data[j++] = kAlpha7; // alpha index.
+ data[j++] = kAlpha7; // alpha index.
+ data[j++] = kAlpha7; // alpha index.
+ data[j++] = kPalette[i]; // color_0.
+ data[j++] = kPalette[i]; // color_1.
+ data[j++] = kColor0; // color index.
+ data[j++] = kColor1; // color index.
+ }
+ return LoadCompressedTexture(data,
+ sizeof(data),
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,
+ kTextureWidth,
+ kTextureHeight);
+}
+
+static void ToRGB888(uint16 rgb565, uint8 rgb888[]) {
+ uint8 r5 = (rgb565 & kRedMask) >> 11;
+ uint8 g6 = (rgb565 & kGreenMask) >> 5;
+ uint8 b5 = (rgb565 & kBlueMask);
+ // Replicate upper bits to lower empty bits.
+ rgb888[0] = (r5 << 3) | (r5 >> 2);
+ rgb888[1] = (g6 << 2) | (g6 >> 4);
+ rgb888[2] = (b5 << 3) | (b5 >> 2);
+}
+
+class CompressedTextureTest : public ::testing::TestWithParam<GLenum> {
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kTextureWidth, kTextureHeight);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLuint LoadProgram() {
+ const char* v_shader_src = SHADER(
+ attribute vec2 a_position;
+ varying vec2 v_texcoord;
+ void main() {
+ gl_Position = vec4(a_position, 0.0, 1.0);
+ v_texcoord = (a_position + 1.0) * 0.5;
+ }
+ );
+ const char* f_shader_src = SHADER(
+ precision mediump float;
+ uniform sampler2D u_texture;
+ varying vec2 v_texcoord;
+ void main() {
+ gl_FragColor = texture2D(u_texture, v_texcoord);
+ }
+ );
+ return GLTestHelper::LoadProgram(v_shader_src, f_shader_src);
+ }
+
+ GLuint LoadTexture(GLenum format) {
+ switch (format) {
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: return LoadTextureDXT1(false);
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: return LoadTextureDXT1(true);
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: return LoadTextureDXT3();
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: return LoadTextureDXT5();
+ default: NOTREACHED();
+ }
+ return 0;
+ }
+
+ private:
+ GLManager gl_;
+};
+
+// The test draws a texture in the given format and verifies that the drawn
+// pixels are of the same color as the texture.
+// The texture consists of 4x4 blocks of texels (same as DXT), one for each
+// color defined in kPalette.
+TEST_P(CompressedTextureTest, Draw) {
+ GLenum format = GetParam();
+
+ // This test is only valid if compressed texture extension is supported.
+ const char* ext = extension(format);
+ if (!GLTestHelper::HasExtension(ext))
+ return;
+
+ // Load shader program.
+ GLuint program = LoadProgram();
+ ASSERT_NE(program, 0u);
+ GLint position_loc = glGetAttribLocation(program, "a_position");
+ GLint texture_loc = glGetUniformLocation(program, "u_texture");
+ ASSERT_NE(position_loc, -1);
+ ASSERT_NE(texture_loc, -1);
+ glUseProgram(program);
+
+ // Load geometry.
+ GLuint vbo = GLTestHelper::SetupUnitQuad(position_loc);
+ ASSERT_NE(vbo, 0u);
+
+ // Load texture.
+ GLuint texture = LoadTexture(format);
+ ASSERT_NE(texture, 0u);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture);
+ glUniform1i(texture_loc, 0);
+
+ // Draw.
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ glFlush();
+
+ // Verify results.
+ int origin[] = {0, 0};
+ uint8 expected_rgba[] = {0, 0, 0, 255};
+ for (unsigned i = 0; i < kPaletteSize; ++i) {
+ origin[0] = kBlockSize * i;
+ ToRGB888(kPalette[i], expected_rgba);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(origin[0], origin[1],
+ kBlockSize, kBlockSize,
+ 0, expected_rgba));
+ }
+ GLTestHelper::CheckGLError("CompressedTextureTest.Draw", __LINE__);
+}
+
+static const GLenum kFormats[] = {
+ GL_COMPRESSED_RGB_S3TC_DXT1_EXT,
+ GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,
+ GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,
+ GL_COMPRESSED_RGBA_S3TC_DXT5_EXT
+};
+INSTANTIATE_TEST_CASE_P(Format,
+ CompressedTextureTest,
+ ::testing::ValuesIn(kFormats));
+
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_bind_uniform_location_unittest.cc b/gpu/command_buffer/tests/gl_bind_uniform_location_unittest.cc
new file mode 100644
index 0000000..c313273
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_bind_uniform_location_unittest.cc
@@ -0,0 +1,224 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class BindUniformLocationTest : public testing::Test {
+ protected:
+ static const GLsizei kResolution = 4;
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kResolution, kResolution);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+TEST_F(BindUniformLocationTest, Basic) {
+ ASSERT_TRUE(
+ GLTestHelper::HasExtension("GL_CHROMIUM_bind_uniform_location"));
+
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ void main()
+ {
+ gl_Position = a_position;
+ }
+ );
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ uniform vec4 u_colorC;
+ uniform vec4 u_colorB[2];
+ uniform vec4 u_colorA;
+ void main()
+ {
+ gl_FragColor = u_colorA + u_colorB[0] + u_colorB[1] + u_colorC;
+ }
+ );
+
+ GLint color_a_location = 3;
+ GLint color_b_location = 10;
+ GLint color_c_location = 5;
+
+ GLuint vertex_shader = GLTestHelper::LoadShader(
+ GL_VERTEX_SHADER, v_shader_str);
+ GLuint fragment_shader = GLTestHelper::LoadShader(
+ GL_FRAGMENT_SHADER, f_shader_str);
+
+ GLuint program = glCreateProgram();
+
+ glBindUniformLocationCHROMIUM(program, color_a_location, "u_colorA");
+ glBindUniformLocationCHROMIUM(program, color_b_location, "u_colorB[0]");
+ glBindUniformLocationCHROMIUM(program, color_c_location, "u_colorC");
+
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ // Link the program
+ glLinkProgram(program);
+ // Check the link status
+ GLint linked = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &linked);
+ EXPECT_EQ(1, linked);
+
+ GLint position_loc = glGetAttribLocation(program, "a_position");
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ glUseProgram(program);
+
+ static const float color_b[] = {
+ 0.0f, 0.50f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.75f, 0.0f,
+ };
+
+ glUniform4f(color_a_location, 0.25f, 0.0f, 0.0f, 0.0f);
+ glUniform4fv(color_b_location, 2, color_b);
+ glUniform4f(color_c_location, 0.0f, 0.0f, 0.0f, 1.0f);
+
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+
+ static const uint8 expected[] = { 64, 128, 192, 255 };
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kResolution, kResolution, 1, expected));
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(BindUniformLocationTest, Compositor) {
+ ASSERT_TRUE(
+ GLTestHelper::HasExtension("GL_CHROMIUM_bind_uniform_location"));
+
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ attribute vec2 a_texCoord;
+ uniform mat4 matrix;
+ uniform vec2 color_a[4];
+ uniform vec4 color_b;
+ varying vec4 v_color;
+ void main()
+ {
+ v_color.xy = color_a[0] + color_a[1];
+ v_color.zw = color_a[2] + color_a[3];
+ v_color += color_b;
+ gl_Position = matrix * a_position;
+ }
+ );
+
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ varying vec4 v_color;
+ uniform float alpha;
+ uniform vec4 multiplier;
+ uniform vec3 color_c[8];
+ void main()
+ {
+ vec4 color_c_sum = vec4(0.0);
+ color_c_sum.xyz += color_c[0];
+ color_c_sum.xyz += color_c[1];
+ color_c_sum.xyz += color_c[2];
+ color_c_sum.xyz += color_c[3];
+ color_c_sum.xyz += color_c[4];
+ color_c_sum.xyz += color_c[5];
+ color_c_sum.xyz += color_c[6];
+ color_c_sum.xyz += color_c[7];
+ color_c_sum.w = alpha;
+ color_c_sum *= multiplier;
+ gl_FragColor = v_color + color_c_sum;
+ }
+ );
+
+ int counter = 0;
+ int matrix_location = counter++;
+ int color_a_location = counter++;
+ int color_b_location = counter++;
+ int alpha_location = counter++;
+ int multiplier_location = counter++;
+ int color_c_location = counter++;
+
+ GLuint vertex_shader = GLTestHelper::LoadShader(
+ GL_VERTEX_SHADER, v_shader_str);
+ GLuint fragment_shader = GLTestHelper::LoadShader(
+ GL_FRAGMENT_SHADER, f_shader_str);
+
+ GLuint program = glCreateProgram();
+
+ glBindUniformLocationCHROMIUM(program, matrix_location, "matrix");
+ glBindUniformLocationCHROMIUM(program, color_a_location, "color_a");
+ glBindUniformLocationCHROMIUM(program, color_b_location, "color_b");
+ glBindUniformLocationCHROMIUM(program, alpha_location, "alpha");
+ glBindUniformLocationCHROMIUM(program, multiplier_location, "multiplier");
+ glBindUniformLocationCHROMIUM(program, color_c_location, "color_c");
+
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ // Link the program
+ glLinkProgram(program);
+ // Check the link status
+ GLint linked = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &linked);
+ EXPECT_EQ(1, linked);
+
+ GLint position_loc = glGetAttribLocation(program, "a_position");
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ glUseProgram(program);
+
+ static const float color_a[] = {
+ 0.1f, 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f, 0.1f,
+ };
+
+ static const float color_c[] = {
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ 0.1f, 0.1f, 0.1f,
+ };
+
+ static const float identity[] = {
+ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
+ };
+
+ glUniformMatrix4fv(matrix_location, 1, false, identity);
+ glUniform2fv(color_a_location, 4, color_a);
+ glUniform4f(color_b_location, 0.2f, 0.2f, 0.2f, 0.2f);
+ glUniform1f(alpha_location, 0.8f);
+ glUniform4f(multiplier_location, 0.5f, 0.5f, 0.5f, 0.5f);
+ glUniform3fv(color_c_location, 8, color_c);
+
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+
+ static const uint8 expected[] = { 204, 204, 204, 204 };
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kResolution, kResolution, 1, expected));
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+
+}
+
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/tests/gl_chromium_framebuffer_multisample_unittest.cc b/gpu/command_buffer/tests/gl_chromium_framebuffer_multisample_unittest.cc
new file mode 100644
index 0000000..fe61d51
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_chromium_framebuffer_multisample_unittest.cc
@@ -0,0 +1,161 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class GLChromiumFramebufferMultisampleTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+// Test that GL is at least minimally working.
+TEST_F(GLChromiumFramebufferMultisampleTest, CachedBindingsTest) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ return;
+ }
+
+ GLuint fbo = 0;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ // If the caching is bad the second call to glBindFramebuffer will do nothing.
+ // which means the draw buffer is bad and will not return
+ // GL_FRAMEBUFFER_COMPLETE and rendering will generate an error.
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ glClear(GL_COLOR_BUFFER_BIT);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(GLChromiumFramebufferMultisampleTest, DrawAndResolve) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ return;
+ }
+
+ static const char* v_shader_str =
+ "attribute vec4 a_Position;\n"
+ "void main()\n"
+ "{\n"
+ " gl_Position = a_Position;\n"
+ "}\n";
+ static const char* f_shader_str =
+ "precision mediump float;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);\n"
+ "}\n";
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ GLuint position_loc = glGetAttribLocation(program, "a_Position");
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ const GLuint width = 100;
+ const GLuint height = 100;
+
+ // Create a sample buffer.
+ GLsizei num_samples = 4, max_samples = 0;
+ glGetIntegerv(GL_MAX_SAMPLES, &max_samples);
+ num_samples = std::min(num_samples, max_samples);
+
+ GLuint sample_fbo, sample_rb;
+ glGenRenderbuffers(1, &sample_rb);
+ glBindRenderbuffer(GL_RENDERBUFFER, sample_rb);
+ glRenderbufferStorageMultisampleCHROMIUM(
+ GL_RENDERBUFFER, num_samples, GL_RGBA8_OES, width, height);
+ GLint param = 0;
+ glGetRenderbufferParameteriv(
+ GL_RENDERBUFFER, GL_RENDERBUFFER_SAMPLES, ¶m);
+ EXPECT_GE(param, num_samples);
+
+ glGenFramebuffers(1, &sample_fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, sample_fbo);
+ glFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_RENDERBUFFER,
+ sample_rb);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ // Create another FBO to resolve the multisample buffer into.
+ GLuint resolve_fbo, resolve_tex;
+ glGenTextures(1, &resolve_tex);
+ glBindTexture(GL_TEXTURE_2D, resolve_tex);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ width,
+ height,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ NULL);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glGenFramebuffers(1, &resolve_fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, resolve_fbo);
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ resolve_tex,
+ 0);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ // Draw one triangle (bottom left half).
+ glViewport(0, 0, width, height);
+ glBindFramebuffer(GL_FRAMEBUFFER, sample_fbo);
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDrawArrays(GL_TRIANGLES, 0, 3);
+
+ // Resolve.
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, sample_fbo);
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, resolve_fbo);
+ glClearColor(1.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glBlitFramebufferCHROMIUM(0,
+ 0,
+ width,
+ height,
+ 0,
+ 0,
+ width,
+ height,
+ GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+
+ // Verify.
+ const uint8 green[] = {0, 255, 0, 255};
+ const uint8 black[] = {0, 0, 0, 0};
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, resolve_fbo);
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(width / 4, (3 * height) / 4, 1, 1, 0, green));
+ EXPECT_TRUE(GLTestHelper::CheckPixels(width - 1, 0, 1, 1, 0, black));
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_chromium_path_rendering_unittest.cc b/gpu/command_buffer/tests/gl_chromium_path_rendering_unittest.cc
new file mode 100644
index 0000000..1d90053
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_chromium_path_rendering_unittest.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include <cmath>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class CHROMIUMPathRenderingTest : public testing::Test {
+ public:
+ static const GLsizei kResolution = 100;
+
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kResolution, kResolution);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() { gl_.Destroy(); }
+
+ void ExpectEqualMatrix(const GLfloat* expected, const GLfloat* actual) {
+ for (size_t i = 0; i < 16; ++i) {
+ EXPECT_EQ(expected[i], actual[i]);
+ }
+ }
+ void ExpectEqualMatrix(const GLfloat* expected, const GLint* actual) {
+ for (size_t i = 0; i < 16; ++i) {
+ EXPECT_EQ(static_cast<GLint>(round(expected[i])), actual[i]);
+ }
+ }
+ GLManager gl_;
+};
+
+TEST_F(CHROMIUMPathRenderingTest, TestMatrix) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_path_rendering")) {
+ return;
+ }
+ static const GLfloat kIdentityMatrix[16] = {
+ 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f};
+ static const GLfloat kSeqMatrix[16] = {
+ 0.5f, -0.5f, -0.1f, -0.8f, 4.4f, 5.5f, 6.6f, 7.7f,
+ 8.8f, 9.9f, 10.11f, 11.22f, 12.33f, 13.44f, 14.55f, 15.66f};
+ static const GLenum kMatrixModes[] = {GL_PATH_MODELVIEW_CHROMIUM,
+ GL_PATH_PROJECTION_CHROMIUM};
+ static const GLenum kGetMatrixModes[] = {GL_PATH_MODELVIEW_MATRIX_CHROMIUM,
+ GL_PATH_PROJECTION_MATRIX_CHROMIUM};
+
+ for (size_t i = 0; i < arraysize(kMatrixModes); ++i) {
+ GLfloat mf[16];
+ GLint mi[16];
+ memset(mf, 0, sizeof(mf));
+ memset(mi, 0, sizeof(mi));
+ glGetFloatv(kGetMatrixModes[i], mf);
+ glGetIntegerv(kGetMatrixModes[i], mi);
+ ExpectEqualMatrix(kIdentityMatrix, mf);
+ ExpectEqualMatrix(kIdentityMatrix, mi);
+
+ glMatrixLoadfCHROMIUM(kMatrixModes[i], kSeqMatrix);
+ memset(mf, 0, sizeof(mf));
+ memset(mi, 0, sizeof(mi));
+ glGetFloatv(kGetMatrixModes[i], mf);
+ glGetIntegerv(kGetMatrixModes[i], mi);
+ ExpectEqualMatrix(kSeqMatrix, mf);
+ ExpectEqualMatrix(kSeqMatrix, mi);
+
+ glMatrixLoadIdentityCHROMIUM(kMatrixModes[i]);
+ memset(mf, 0, sizeof(mf));
+ memset(mi, 0, sizeof(mi));
+ glGetFloatv(kGetMatrixModes[i], mf);
+ glGetIntegerv(kGetMatrixModes[i], mi);
+ ExpectEqualMatrix(kIdentityMatrix, mf);
+ ExpectEqualMatrix(kIdentityMatrix, mi);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+}
+
+TEST_F(CHROMIUMPathRenderingTest, TestMatrixErrors) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_path_rendering")) {
+ return;
+ }
+ GLfloat mf[16];
+ memset(mf, 0, sizeof(mf));
+
+ // This should fail.
+ glMatrixLoadfCHROMIUM(GL_PATH_MODELVIEW_CHROMIUM - 1, mf);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), glGetError());
+
+ glMatrixLoadfCHROMIUM(GL_PATH_MODELVIEW_CHROMIUM, mf);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // This should fail.
+ glMatrixLoadIdentityCHROMIUM(GL_PATH_PROJECTION_CHROMIUM + 1);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), glGetError());
+
+ glMatrixLoadIdentityCHROMIUM(GL_PATH_PROJECTION_CHROMIUM);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_copy_texture_CHROMIUM_unittest.cc b/gpu/command_buffer/tests/gl_copy_texture_CHROMIUM_unittest.cc
new file mode 100644
index 0000000..d03b121
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_copy_texture_CHROMIUM_unittest.cc
@@ -0,0 +1,632 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES
+#endif
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+// A collection of tests that exercise the GL_CHROMIUM_copy_texture extension.
+class GLCopyTextureCHROMIUMTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+
+ glGenTextures(2, textures_);
+ glBindTexture(GL_TEXTURE_2D, textures_[1]);
+
+ // Some drivers (NVidia/SGX) require texture settings to be a certain way or
+ // they won't report FRAMEBUFFER_COMPLETE.
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glGenFramebuffers(1, &framebuffer_id_);
+ glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_id_);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ textures_[1], 0);
+ }
+
+ virtual void TearDown() {
+ glDeleteTextures(2, textures_);
+ glDeleteFramebuffers(1, &framebuffer_id_);
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+ GLuint textures_[2];
+ GLuint framebuffer_id_;
+};
+
+// Test to ensure that the basic functionality of the extension works.
+TEST_F(GLCopyTextureCHROMIUMTest, Basic) {
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(glGetError() == GL_NO_ERROR);
+
+ // Check the FB is still bound.
+ GLint value = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &value);
+ GLuint fb_id = value;
+ EXPECT_EQ(framebuffer_id_, fb_id);
+
+ // Check that FB is complete.
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ GLTestHelper::CheckPixels(0, 0, 1, 1, 0, pixels);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+TEST_F(GLCopyTextureCHROMIUMTest, InternalFormat) {
+ GLint src_formats[] = {GL_ALPHA, GL_RGB, GL_RGBA,
+ GL_LUMINANCE, GL_LUMINANCE_ALPHA, GL_BGRA_EXT};
+ GLint dest_formats[] = {GL_RGB, GL_RGBA};
+
+ for (size_t src_index = 0; src_index < arraysize(src_formats); src_index++) {
+ for (size_t dest_index = 0; dest_index < arraysize(dest_formats);
+ dest_index++) {
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ src_formats[src_index],
+ 1,
+ 1,
+ 0,
+ src_formats[src_index],
+ GL_UNSIGNED_BYTE,
+ NULL);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D,
+ textures_[0],
+ textures_[1],
+ 0,
+ dest_formats[dest_index],
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError()) << "src_index:" << src_index
+ << " dest_index:" << dest_index;
+ }
+ }
+}
+
+TEST_F(GLCopyTextureCHROMIUMTest, InternalFormatNotSupported) {
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ // Check unsupported format reports error.
+ GLint unsupported_dest_formats[] = {GL_ALPHA, GL_LUMINANCE,
+ GL_LUMINANCE_ALPHA};
+ for (size_t dest_index = 0; dest_index < arraysize(unsupported_dest_formats);
+ dest_index++) {
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D,
+ textures_[0],
+ textures_[1],
+ 0,
+ unsupported_dest_formats[dest_index],
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_INVALID_OPERATION == glGetError())
+ << "dest_index:" << dest_index;
+ }
+}
+
+// Test to ensure that the destination texture is redefined if the properties
+// are different.
+TEST_F(GLCopyTextureCHROMIUMTest, RedefineDestinationTexture) {
+ uint8 pixels[4 * 4] = {255u, 0u, 0u, 255u, 255u, 0u, 0u, 255u,
+ 255u, 0u, 0u, 255u, 255u, 0u, 0u, 255u};
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+
+ glBindTexture(GL_TEXTURE_2D, textures_[1]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_BGRA_EXT,
+ 1,
+ 1,
+ 0,
+ GL_BGRA_EXT,
+ GL_UNSIGNED_BYTE,
+ pixels);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ // GL_INVALID_OPERATION due to "intrinsic format" != "internal format".
+ glTexSubImage2D(
+ GL_TEXTURE_2D, 0, 0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+ EXPECT_TRUE(GL_INVALID_OPERATION == glGetError());
+ // GL_INVALID_VALUE due to bad dimensions.
+ glTexSubImage2D(
+ GL_TEXTURE_2D, 0, 1, 1, 1, 1, GL_BGRA_EXT, GL_UNSIGNED_BYTE, pixels);
+ EXPECT_TRUE(GL_INVALID_VALUE == glGetError());
+
+ // If the dest texture has different properties, glCopyTextureCHROMIUM()
+ // redefines them.
+ glCopyTextureCHROMIUM(
+ GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA, GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ // glTexSubImage2D() succeeds because textures_[1] is redefined into 2x2
+ // dimension and GL_RGBA format.
+ glBindTexture(GL_TEXTURE_2D, textures_[1]);
+ glTexSubImage2D(
+ GL_TEXTURE_2D, 0, 1, 1, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ // Check the FB is still bound.
+ GLint value = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &value);
+ GLuint fb_id = value;
+ EXPECT_EQ(framebuffer_id_, fb_id);
+
+ // Check that FB is complete.
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ GLTestHelper::CheckPixels(1, 1, 1, 1, 0, &pixels[12]);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+// Test that the extension respects the flip-y pixel storage setting.
+TEST_F(GLCopyTextureCHROMIUMTest, FlipY) {
+ uint8 pixels[2][2][4];
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ pixels[y][x][0] = x + y;
+ pixels[y][x][1] = x + y;
+ pixels[y][x][2] = x + y;
+ pixels[y][x][3] = 255u;
+ }
+ }
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glPixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 copied_pixels[2][2][4] = {{{0}}};
+ glReadPixels(0, 0, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, copied_pixels);
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ EXPECT_EQ(pixels[1-y][x][0], copied_pixels[y][x][0]);
+ EXPECT_EQ(pixels[1-y][x][1], copied_pixels[y][x][1]);
+ EXPECT_EQ(pixels[1-y][x][2], copied_pixels[y][x][2]);
+ EXPECT_EQ(pixels[1-y][x][3], copied_pixels[y][x][3]);
+ }
+ }
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+// Test that the extension respects the GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM
+// storage setting.
+TEST_F(GLCopyTextureCHROMIUMTest, PremultiplyAlpha) {
+ uint8 pixels[1 * 4] = { 2, 2, 2, 128 };
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glPixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM, GL_TRUE);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 copied_pixels[1 * 4] = {0};
+ glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, copied_pixels);
+ EXPECT_EQ(1u, copied_pixels[0]);
+ EXPECT_EQ(1u, copied_pixels[1]);
+ EXPECT_EQ(1u, copied_pixels[2]);
+ EXPECT_EQ(128u, copied_pixels[3]);
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+// Test that the extension respects the GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM
+// storage setting.
+TEST_F(GLCopyTextureCHROMIUMTest, UnpremultiplyAlpha) {
+ uint8 pixels[1 * 4] = { 16, 16, 16, 128 };
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glPixelStorei(GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM, GL_TRUE);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 copied_pixels[1 * 4] = {0};
+ glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, copied_pixels);
+ EXPECT_EQ(32u, copied_pixels[0]);
+ EXPECT_EQ(32u, copied_pixels[1]);
+ EXPECT_EQ(32u, copied_pixels[2]);
+ EXPECT_EQ(128u, copied_pixels[3]);
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+TEST_F(GLCopyTextureCHROMIUMTest, FlipYAndPremultiplyAlpha) {
+ uint8 pixels[2][2][4];
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ uint8 color = 16 * x + 16 * y;
+ pixels[y][x][0] = color;
+ pixels[y][x][1] = color;
+ pixels[y][x][2] = color;
+ pixels[y][x][3] = 128u;
+ }
+ }
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glPixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ glPixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM, GL_TRUE);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 copied_pixels[2][2][4] = {{{0}}};
+ glReadPixels(0, 0, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, copied_pixels);
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ EXPECT_EQ(pixels[1-y][x][0] / 2, copied_pixels[y][x][0]);
+ EXPECT_EQ(pixels[1-y][x][1] / 2, copied_pixels[y][x][1]);
+ EXPECT_EQ(pixels[1-y][x][2] / 2, copied_pixels[y][x][2]);
+ EXPECT_EQ(pixels[1-y][x][3], copied_pixels[y][x][3]);
+ }
+ }
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+TEST_F(GLCopyTextureCHROMIUMTest, FlipYAndUnpremultiplyAlpha) {
+ uint8 pixels[2][2][4];
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ uint8 color = 16 * x + 16 * y;
+ pixels[y][x][0] = color;
+ pixels[y][x][1] = color;
+ pixels[y][x][2] = color;
+ pixels[y][x][3] = 128u;
+ }
+ }
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ glPixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ glPixelStorei(GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM, GL_TRUE);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 copied_pixels[2][2][4] = {{{0}}};
+ glReadPixels(0, 0, 2, 2, GL_RGBA, GL_UNSIGNED_BYTE, copied_pixels);
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ EXPECT_EQ(pixels[1-y][x][0] * 2, copied_pixels[y][x][0]);
+ EXPECT_EQ(pixels[1-y][x][1] * 2, copied_pixels[y][x][1]);
+ EXPECT_EQ(pixels[1-y][x][2] * 2, copied_pixels[y][x][2]);
+ EXPECT_EQ(pixels[1-y][x][3], copied_pixels[y][x][3]);
+ }
+ }
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+namespace {
+
+void glEnableDisable(GLint param, GLboolean value) {
+ if (value)
+ glEnable(param);
+ else
+ glDisable(param);
+}
+
+} // unnamed namespace
+
+// Validate that some basic GL state is not touched upon execution of
+// the extension.
+TEST_F(GLCopyTextureCHROMIUMTest, BasicStatePreservation) {
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ GLboolean reference_settings[2] = { GL_TRUE, GL_FALSE };
+ for (int x = 0; x < 2; ++x) {
+ GLboolean setting = reference_settings[x];
+ glEnableDisable(GL_DEPTH_TEST, setting);
+ glEnableDisable(GL_SCISSOR_TEST, setting);
+ glEnableDisable(GL_STENCIL_TEST, setting);
+ glEnableDisable(GL_CULL_FACE, setting);
+ glEnableDisable(GL_BLEND, setting);
+ glColorMask(setting, setting, setting, setting);
+ glDepthMask(setting);
+
+ glActiveTexture(GL_TEXTURE1 + x);
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0,
+ GL_RGBA, GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ EXPECT_EQ(setting, glIsEnabled(GL_DEPTH_TEST));
+ EXPECT_EQ(setting, glIsEnabled(GL_SCISSOR_TEST));
+ EXPECT_EQ(setting, glIsEnabled(GL_STENCIL_TEST));
+ EXPECT_EQ(setting, glIsEnabled(GL_CULL_FACE));
+ EXPECT_EQ(setting, glIsEnabled(GL_BLEND));
+
+ GLboolean bool_array[4] = { GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE };
+ glGetBooleanv(GL_DEPTH_WRITEMASK, bool_array);
+ EXPECT_EQ(setting, bool_array[0]);
+
+ bool_array[0] = GL_FALSE;
+ glGetBooleanv(GL_COLOR_WRITEMASK, bool_array);
+ EXPECT_EQ(setting, bool_array[0]);
+ EXPECT_EQ(setting, bool_array[1]);
+ EXPECT_EQ(setting, bool_array[2]);
+ EXPECT_EQ(setting, bool_array[3]);
+
+ GLint active_texture = 0;
+ glGetIntegerv(GL_ACTIVE_TEXTURE, &active_texture);
+ EXPECT_EQ(GL_TEXTURE1 + x, active_texture);
+ }
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+};
+
+// Verify that invocation of the extension does not modify the bound
+// texture state.
+TEST_F(GLCopyTextureCHROMIUMTest, TextureStatePreserved) {
+ // Setup the texture used for the extension invocation.
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ GLuint texture_ids[2];
+ glGenTextures(2, texture_ids);
+
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture_ids[0]);
+
+ glActiveTexture(GL_TEXTURE1);
+ glBindTexture(GL_TEXTURE_2D, texture_ids[1]);
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0,
+ GL_RGBA, GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ GLint active_texture = 0;
+ glGetIntegerv(GL_ACTIVE_TEXTURE, &active_texture);
+ EXPECT_EQ(GL_TEXTURE1, active_texture);
+
+ GLint bound_texture = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
+ EXPECT_EQ(texture_ids[1], static_cast<GLuint>(bound_texture));
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ bound_texture = 0;
+ glActiveTexture(GL_TEXTURE0);
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
+ EXPECT_EQ(texture_ids[0], static_cast<GLuint>(bound_texture));
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ glDeleteTextures(2, texture_ids);
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+// Verify that invocation of the extension does not perturb the currently
+// bound FBO state.
+TEST_F(GLCopyTextureCHROMIUMTest, FBOStatePreserved) {
+ // Setup the texture used for the extension invocation.
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ 0);
+
+ GLuint renderbuffer_id;
+ glGenRenderbuffers(1, &renderbuffer_id);
+ glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer_id);
+ glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, 1, 1);
+
+ GLuint framebuffer_id;
+ glGenFramebuffers(1, &framebuffer_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_id);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ texture_id, 0);
+ glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER, renderbuffer_id);
+ EXPECT_TRUE(
+ GL_FRAMEBUFFER_COMPLETE == glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ // Test that we can write to the bound framebuffer
+ uint8 expected_color[4] = { 255u, 255u, 0, 255u };
+ glClearColor(1.0, 1.0, 0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_color);
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0,
+ GL_RGBA, GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ EXPECT_TRUE(glIsFramebuffer(framebuffer_id));
+
+ // Ensure that reading from the framebuffer produces correct pixels.
+ GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_color);
+
+ uint8 expected_color2[4] = { 255u, 0, 255u, 255u };
+ glClearColor(1.0, 0, 1.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_color2);
+
+ GLint bound_fbo = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &bound_fbo);
+ EXPECT_EQ(framebuffer_id, static_cast<GLuint>(bound_fbo));
+
+ GLint fbo_params = 0;
+ glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ &fbo_params);
+ EXPECT_EQ(GL_TEXTURE, fbo_params);
+
+ fbo_params = 0;
+ glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ &fbo_params);
+ EXPECT_EQ(texture_id, static_cast<GLuint>(fbo_params));
+
+ fbo_params = 0;
+ glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ &fbo_params);
+ EXPECT_EQ(GL_RENDERBUFFER, fbo_params);
+
+ fbo_params = 0;
+ glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
+ &fbo_params);
+ EXPECT_EQ(renderbuffer_id, static_cast<GLuint>(fbo_params));
+
+ glDeleteRenderbuffers(1, &renderbuffer_id);
+ glDeleteTextures(1, &texture_id);
+ glDeleteFramebuffers(1, &framebuffer_id);
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+TEST_F(GLCopyTextureCHROMIUMTest, ProgramStatePreservation) {
+ // unbind the one created in setup.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ GLManager gl2;
+ GLManager::Options options;
+ options.size = gfx::Size(16, 16);
+ options.share_group_manager = &gl_;
+ gl2.Initialize(options);
+ gl_.MakeCurrent();
+
+ static const char* v_shader_str =
+ "attribute vec4 g_Position;\n"
+ "void main()\n"
+ "{\n"
+ " gl_Position = g_Position;\n"
+ "}\n";
+ static const char* f_shader_str =
+ "precision mediump float;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = vec4(0,1,0,1);\n"
+ "}\n";
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ GLuint position_loc = glGetAttribLocation(program, "g_Position");
+ glFlush();
+
+ // Delete program from other context.
+ gl2.MakeCurrent();
+ glDeleteProgram(program);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+ glFlush();
+
+ // Program should still be usable on this context.
+ gl_.MakeCurrent();
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ // test using program before
+ uint8 expected[] = { 0, 255, 0, 255, };
+ uint8 zero[] = { 0, 0, 0, 0, };
+ glClear(GL_COLOR_BUFFER_BIT);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, zero));
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected));
+
+ // Call copyTextureCHROMIUM
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels);
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+
+ // test using program after
+ glClear(GL_COLOR_BUFFER_BIT);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, zero));
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected));
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ gl2.MakeCurrent();
+ gl2.Destroy();
+ gl_.MakeCurrent();
+}
+
+// Test that glCopyTextureCHROMIUM doesn't leak uninitialized textures.
+TEST_F(GLCopyTextureCHROMIUMTest, UninitializedSource) {
+ const GLsizei kWidth = 64, kHeight = 64;
+ glBindTexture(GL_TEXTURE_2D, textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, kWidth, kHeight,
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D, textures_[0], textures_[1], 0, GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ uint8 pixels[kHeight][kWidth][4] = {{{1}}};
+ glReadPixels(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+ for (int x = 0; x < kWidth; ++x) {
+ for (int y = 0; y < kHeight; ++y) {
+ EXPECT_EQ(0, pixels[y][x][0]);
+ EXPECT_EQ(0, pixels[y][x][1]);
+ EXPECT_EQ(0, pixels[y][x][2]);
+ EXPECT_EQ(0, pixels[y][x][3]);
+ }
+ }
+
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_depth_texture_unittest.cc b/gpu/command_buffer/tests/gl_depth_texture_unittest.cc
new file mode 100644
index 0000000..83bb84c
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_depth_texture_unittest.cc
@@ -0,0 +1,239 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class DepthTextureTest : public testing::Test {
+ protected:
+ static const GLsizei kResolution = 64;
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kResolution, kResolution);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLuint SetupUnitQuad(GLint position_location);
+
+ GLManager gl_;
+};
+
+GLuint DepthTextureTest::SetupUnitQuad(GLint position_location) {
+ GLuint vbo = 0;
+ glGenBuffers(1, &vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ static float vertices[] = {
+ 1.0f, 1.0f, 1.0f,
+ -1.0f, 1.0f, 0.0f,
+ -1.0f, -1.0f, -1.0f,
+ 1.0f, 1.0f, 1.0f,
+ -1.0f, -1.0f, -1.0f,
+ 1.0f, -1.0f, 0.0f,
+ };
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+ glEnableVertexAttribArray(position_location);
+ glVertexAttribPointer(position_location, 3, GL_FLOAT, GL_FALSE, 0, 0);
+
+ return vbo;
+}
+
+namespace {
+
+struct FormatType {
+ GLenum format;
+ GLenum type;
+};
+
+} // anonymous namespace
+
+TEST_F(DepthTextureTest, RenderTo) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_depth_texture")) {
+ return;
+ }
+
+ bool have_depth_stencil = GLTestHelper::HasExtension(
+ "GL_OES_packed_depth_stencil");
+
+ static const char* v_shader_str = SHADER(
+ attribute vec4 v_position;
+ void main()
+ {
+ gl_Position = v_position;
+ }
+ );
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ uniform sampler2D u_texture;
+ uniform vec2 u_resolution;
+ void main()
+ {
+ vec2 texcoord = gl_FragCoord.xy / u_resolution;
+ gl_FragColor = texture2D(u_texture, texcoord);
+ }
+ );
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+
+ GLint position_loc = glGetAttribLocation(program, "v_position");
+ GLint resolution_loc = glGetUniformLocation(program, "u_resolution");
+
+ SetupUnitQuad(position_loc);
+
+ // Depth test needs to be on for the depth buffer to be updated.
+ glEnable(GL_DEPTH_TEST);
+
+ // create an fbo
+ GLuint fbo = 0;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+
+ // create a depth texture.
+ GLuint color_texture = 0;
+ GLuint depth_texture = 0;
+
+ glGenTextures(1, &color_texture);
+ glBindTexture(GL_TEXTURE_2D, color_texture);
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, kResolution, kResolution,
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glFramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_texture, 0);
+
+ glGenTextures(1, &depth_texture);
+ glBindTexture(GL_TEXTURE_2D, depth_texture);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glFramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_texture, 0);
+
+ glUseProgram(program);
+ glUniform2f(resolution_loc, kResolution, kResolution);
+
+ static const FormatType format_types[] = {
+ { GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT },
+ { GL_DEPTH_COMPONENT, GL_UNSIGNED_INT },
+ { GL_DEPTH_STENCIL_OES, GL_UNSIGNED_INT_24_8_OES },
+ };
+ for (size_t ii = 0; ii < arraysize(format_types); ++ii) {
+ const FormatType& format_type = format_types[ii];
+ GLenum format = format_type.format;
+ GLenum type = format_type.type;
+
+ if (format == GL_DEPTH_STENCIL_OES && !have_depth_stencil) {
+ continue;
+ }
+
+ glBindTexture(GL_TEXTURE_2D, depth_texture);
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, format, kResolution, kResolution,
+ 0, format, type, NULL);
+
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE), status)
+ << "iteration: " << ii;
+ if (status != GL_FRAMEBUFFER_COMPLETE) {
+ continue;
+ }
+
+ if (!GLTestHelper::CheckGLError("no errors after setup", __LINE__)) {
+ continue;
+ }
+
+ glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ // Disconnect the texture so we'll render with the default texture.
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ // Render to the fbo.
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+
+ if (!GLTestHelper::CheckGLError("no errors after depth draw", __LINE__)) {
+ continue;
+ }
+
+ // Render with the depth texture.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glBindTexture(GL_TEXTURE_2D, depth_texture);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+
+ if (!GLTestHelper::CheckGLError("no errors after texture draw", __LINE__)) {
+ continue;
+ }
+
+ uint8 actual_pixels[kResolution * kResolution * 4] = { 0, };
+ glReadPixels(
+ 0, 0, kResolution, kResolution, GL_RGBA, GL_UNSIGNED_BYTE,
+ actual_pixels);
+
+ if (!GLTestHelper::CheckGLError("no errors after readpixels", __LINE__)) {
+ continue;
+ }
+
+ // Check that each pixel's red value is less than the previous pixel in
+ // either direction. Basically verify we have a gradient. No assumption is
+ // made about the other channels green, blue and alpha since, according to
+ // the GL_CHROMIUM_depth_texture spec, they have undefined values for
+ // depth textures.
+ int bad_count = 0; // used to not spam the log with too many messages.
+ for (GLint yy = 0; bad_count < 16 && yy < kResolution; ++yy) {
+ for (GLint xx = 0; bad_count < 16 && xx < kResolution; ++xx) {
+ const uint8* actual = &actual_pixels[(yy * kResolution + xx) * 4];
+ const uint8* left = actual - 4;
+ const uint8* down = actual - kResolution * 4;
+
+ // NOTE: Qualcomm on Nexus 4 the right most column has the same
+ // values as the next to right most column. (bad interpolator?)
+ if (xx > 0 && xx < kResolution - 1) {
+ EXPECT_GT(actual[0], left[0])
+ << "pixel at " << xx << ", " << yy
+ << " actual[0] =" << static_cast<unsigned>(actual[0])
+ << " left[0] =" << static_cast<unsigned>(left[0])
+ << " actual =" << reinterpret_cast<const void*>(actual)
+ << " left =" << reinterpret_cast<const void*>(left);
+ bad_count += (actual[0] > left[0] ? 0 : 1);
+ }
+
+ if (yy > 0 && yy < kResolution - 1) {
+ EXPECT_GT(actual[0], down[0]) << "pixel at " << xx << ", " << yy;
+ bad_count += (actual[0] > down[0] ? 0 : 1);
+ }
+ }
+ }
+
+ // Check that bottom left corner is vastly different thatn top right.
+ EXPECT_GT(
+ actual_pixels[(kResolution * kResolution - 1) * 4] - actual_pixels[0],
+ 0xC0);
+
+ GLTestHelper::CheckGLError("no errors after everything", __LINE__);
+ }
+}
+
+} // namespace gpu
+
+
+
+
diff --git a/gpu/command_buffer/tests/gl_gpu_memory_buffer_unittest.cc b/gpu/command_buffer/tests/gl_gpu_memory_buffer_unittest.cc
new file mode 100644
index 0000000..c73cc3d
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_gpu_memory_buffer_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2chromium.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/process/process_handle.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_image.h"
+
+using testing::_;
+using testing::IgnoreResult;
+using testing::InvokeWithoutArgs;
+using testing::Invoke;
+using testing::Return;
+using testing::SetArgPointee;
+using testing::StrictMock;
+
+namespace gpu {
+namespace gles2 {
+
+static const int kImageWidth = 32;
+static const int kImageHeight = 32;
+static const int kImageBytesPerPixel = 4;
+
+class GpuMemoryBufferTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ gl_.MakeCurrent();
+
+ glGenTextures(2, texture_ids_);
+ glBindTexture(GL_TEXTURE_2D, texture_ids_[1]);
+
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glGenFramebuffers(1, &framebuffer_id_);
+ glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_id_);
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ texture_ids_[1],
+ 0);
+ }
+
+ virtual void TearDown() {
+ glDeleteTextures(2, texture_ids_);
+ glDeleteFramebuffers(1, &framebuffer_id_);
+
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+ GLuint texture_ids_[2];
+ GLuint framebuffer_id_;
+};
+
+// An end to end test that tests the whole GpuMemoryBuffer lifecycle.
+TEST_F(GpuMemoryBufferTest, Lifecycle) {
+ uint8 pixels[1 * 4] = { 255u, 0u, 0u, 255u };
+
+ // Create the image. This should add the image ID to the ImageManager.
+ GLuint image_id = glCreateImageCHROMIUM(
+ kImageWidth, kImageHeight, GL_RGBA8_OES, GL_IMAGE_MAP_CHROMIUM);
+ EXPECT_NE(0u, image_id);
+ EXPECT_TRUE(gl_.decoder()->GetImageManager()->LookupImage(image_id) != NULL);
+
+ // Map image for writing.
+ uint8* mapped_buffer = static_cast<uint8*>(glMapImageCHROMIUM(image_id));
+ ASSERT_TRUE(mapped_buffer != NULL);
+
+ // Assign a value to each pixel.
+ int stride = kImageWidth * kImageBytesPerPixel;
+ for (int x = 0; x < kImageWidth; ++x) {
+ for (int y = 0; y < kImageHeight; ++y) {
+ mapped_buffer[y * stride + x * kImageBytesPerPixel + 0] = pixels[0];
+ mapped_buffer[y * stride + x * kImageBytesPerPixel + 1] = pixels[1];
+ mapped_buffer[y * stride + x * kImageBytesPerPixel + 2] = pixels[2];
+ mapped_buffer[y * stride + x * kImageBytesPerPixel + 3] = pixels[3];
+ }
+ }
+
+ // Unmap the image.
+ glUnmapImageCHROMIUM(image_id);
+
+ // Bind the texture and the image.
+ glBindTexture(GL_TEXTURE_2D, texture_ids_[0]);
+ glBindTexImage2DCHROMIUM(GL_TEXTURE_2D, image_id);
+
+ // Copy texture so we can verify result using CheckPixels.
+ glCopyTextureCHROMIUM(GL_TEXTURE_2D,
+ texture_ids_[0],
+ texture_ids_[1],
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ EXPECT_TRUE(glGetError() == GL_NO_ERROR);
+
+ // Check if pixels match the values that were assigned to the mapped buffer.
+ GLTestHelper::CheckPixels(0, 0, kImageWidth, kImageHeight, 0, pixels);
+ EXPECT_TRUE(GL_NO_ERROR == glGetError());
+
+ // Release the image.
+ glReleaseTexImage2DCHROMIUM(GL_TEXTURE_2D, image_id);
+
+ // Destroy the image.
+ glDestroyImageCHROMIUM(image_id);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_lose_context_chromium_unittest.cc b/gpu/command_buffer/tests/gl_lose_context_chromium_unittest.cc
new file mode 100644
index 0000000..547c1ed
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_lose_context_chromium_unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class GLLoseContextTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ gl2_.Initialize(options);
+ options.context_lost_allowed = true;
+ gl1a_.Initialize(options);
+ options.share_group_manager = &gl1a_;
+ gl1b_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl1a_.Destroy();
+ gl1b_.Destroy();
+ gl2_.Destroy();
+ }
+
+ GLManager gl1a_;
+ GLManager gl1b_;
+ GLManager gl2_;
+};
+
+// Test that glLoseContextCHROMIUM loses context in the same
+// share group but not other.
+TEST_F(GLLoseContextTest, ShareGroup) {
+ // If losing the context will cause the process to exit, do not perform this
+ // test as it will cause all subsequent tests to not run.
+ if (gl1a_.workarounds().exit_on_context_lost)
+ return;
+
+ gl1a_.MakeCurrent();
+ glLoseContextCHROMIUM(
+ GL_GUILTY_CONTEXT_RESET_EXT, GL_INNOCENT_CONTEXT_RESET_EXT);
+
+ uint8 expected_no_draw[] = {
+ GLTestHelper::kCheckClearValue,
+ GLTestHelper::kCheckClearValue,
+ GLTestHelper::kCheckClearValue,
+ GLTestHelper::kCheckClearValue,
+ };
+ // Expect the read will fail.
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_no_draw));
+ gl1b_.MakeCurrent();
+ // Expect the read will fail.
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_no_draw));
+ gl2_.MakeCurrent();
+ uint8 expected_draw[] = { 0, 0, 0, 0, };
+ // Expect the read will succeed.
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_draw));
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_manager.cc b/gpu/command_buffer/tests/gl_manager.cc
new file mode 100644
index 0000000..bebf74c
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_manager.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/memory/ref_counted_memory.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_ref_counted_memory.h"
+#include "ui/gl/gl_share_group.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+namespace {
+
+int BytesPerPixel(unsigned internalformat) {
+ switch (internalformat) {
+ case GL_RGBA8_OES:
+ return 4;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
+ public:
+ GpuMemoryBufferImpl(base::RefCountedBytes* bytes,
+ const gfx::Size& size,
+ unsigned internalformat)
+ : bytes_(bytes),
+ size_(size),
+ internalformat_(internalformat),
+ mapped_(false) {}
+
+ // Overridden from gfx::GpuMemoryBuffer:
+ virtual void* Map() OVERRIDE {
+ mapped_ = true;
+ return &bytes_->data().front();
+ }
+ virtual void Unmap() OVERRIDE { mapped_ = false; }
+ virtual bool IsMapped() const OVERRIDE { return mapped_; }
+ virtual uint32 GetStride() const OVERRIDE {
+ return size_.width() * BytesPerPixel(internalformat_);
+ }
+ virtual gfx::GpuMemoryBufferHandle GetHandle() const OVERRIDE {
+ NOTREACHED();
+ return gfx::GpuMemoryBufferHandle();
+ }
+
+ private:
+ scoped_refptr<base::RefCountedBytes> bytes_;
+ const gfx::Size size_;
+ unsigned internalformat_;
+ bool mapped_;
+};
+
+} // namespace
+
+int GLManager::use_count_;
+scoped_refptr<gfx::GLShareGroup>* GLManager::base_share_group_;
+scoped_refptr<gfx::GLSurface>* GLManager::base_surface_;
+scoped_refptr<gfx::GLContext>* GLManager::base_context_;
+
+GLManager::Options::Options()
+ : size(4, 4),
+ share_group_manager(NULL),
+ share_mailbox_manager(NULL),
+ virtual_manager(NULL),
+ bind_generates_resource(false),
+ lose_context_when_out_of_memory(false),
+ context_lost_allowed(false) {
+}
+
+GLManager::GLManager() : context_lost_allowed_(false) {
+ SetupBaseContext();
+}
+
+GLManager::~GLManager() {
+ --use_count_;
+ if (!use_count_) {
+ if (base_share_group_) {
+ delete base_context_;
+ base_context_ = NULL;
+ }
+ if (base_surface_) {
+ delete base_surface_;
+ base_surface_ = NULL;
+ }
+ if (base_context_) {
+ delete base_context_;
+ base_context_ = NULL;
+ }
+ }
+}
+
+void GLManager::Initialize(const GLManager::Options& options) {
+ const int32 kCommandBufferSize = 1024 * 1024;
+ const size_t kStartTransferBufferSize = 4 * 1024 * 1024;
+ const size_t kMinTransferBufferSize = 1 * 256 * 1024;
+ const size_t kMaxTransferBufferSize = 16 * 1024 * 1024;
+
+ context_lost_allowed_ = options.context_lost_allowed;
+
+ gles2::MailboxManager* mailbox_manager = NULL;
+ if (options.share_mailbox_manager) {
+ mailbox_manager = options.share_mailbox_manager->mailbox_manager();
+ } else if (options.share_group_manager) {
+ mailbox_manager = options.share_group_manager->mailbox_manager();
+ }
+
+ gfx::GLShareGroup* share_group = NULL;
+ if (options.share_group_manager) {
+ share_group = options.share_group_manager->share_group();
+ } else if (options.share_mailbox_manager) {
+ share_group = options.share_mailbox_manager->share_group();
+ }
+
+ gles2::ContextGroup* context_group = NULL;
+ gles2::ShareGroup* client_share_group = NULL;
+ if (options.share_group_manager) {
+ context_group = options.share_group_manager->decoder_->GetContextGroup();
+ client_share_group =
+ options.share_group_manager->gles2_implementation()->share_group();
+ }
+
+ gfx::GLContext* real_gl_context = NULL;
+ if (options.virtual_manager) {
+ real_gl_context = options.virtual_manager->context();
+ }
+
+ mailbox_manager_ =
+ mailbox_manager ? mailbox_manager : new gles2::MailboxManager;
+ share_group_ =
+ share_group ? share_group : new gfx::GLShareGroup;
+
+ gfx::GpuPreference gpu_preference(gfx::PreferDiscreteGpu);
+ std::vector<int32> attribs;
+ gles2::ContextCreationAttribHelper attrib_helper;
+ attrib_helper.red_size = 8;
+ attrib_helper.green_size = 8;
+ attrib_helper.blue_size = 8;
+ attrib_helper.alpha_size = 8;
+ attrib_helper.depth_size = 16;
+ attrib_helper.Serialize(&attribs);
+
+ if (!context_group) {
+ context_group =
+ new gles2::ContextGroup(mailbox_manager_.get(),
+ NULL,
+ new gpu::gles2::ShaderTranslatorCache,
+ NULL,
+ options.bind_generates_resource);
+ }
+
+ decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group));
+
+ command_buffer_.reset(new CommandBufferService(
+ decoder_->GetContextGroup()->transfer_buffer_manager()));
+ ASSERT_TRUE(command_buffer_->Initialize())
+ << "could not create command buffer service";
+
+ gpu_scheduler_.reset(new GpuScheduler(command_buffer_.get(),
+ decoder_.get(),
+ decoder_.get()));
+
+ decoder_->set_engine(gpu_scheduler_.get());
+
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(options.size);
+ ASSERT_TRUE(surface_.get() != NULL) << "could not create offscreen surface";
+
+ if (base_context_) {
+ context_ = scoped_refptr<gfx::GLContext>(new gpu::GLContextVirtual(
+ share_group_.get(), base_context_->get(), decoder_->AsWeakPtr()));
+ ASSERT_TRUE(context_->Initialize(
+ surface_.get(), gfx::PreferIntegratedGpu));
+ } else {
+ if (real_gl_context) {
+ context_ = scoped_refptr<gfx::GLContext>(new gpu::GLContextVirtual(
+ share_group_.get(), real_gl_context, decoder_->AsWeakPtr()));
+ ASSERT_TRUE(context_->Initialize(
+ surface_.get(), gfx::PreferIntegratedGpu));
+ } else {
+ context_ = gfx::GLContext::CreateGLContext(share_group_.get(),
+ surface_.get(),
+ gpu_preference);
+ }
+ }
+ ASSERT_TRUE(context_.get() != NULL) << "could not create GL context";
+
+ ASSERT_TRUE(context_->MakeCurrent(surface_.get()));
+
+ ASSERT_TRUE(decoder_->Initialize(
+ surface_.get(),
+ context_.get(),
+ true,
+ options.size,
+ ::gpu::gles2::DisallowedFeatures(),
+ attribs)) << "could not initialize decoder";
+
+ command_buffer_->SetPutOffsetChangeCallback(
+ base::Bind(&GLManager::PumpCommands, base::Unretained(this)));
+ command_buffer_->SetGetBufferChangeCallback(
+ base::Bind(&GLManager::GetBufferChanged, base::Unretained(this)));
+
+ // Create the GLES2 helper, which writes the command buffer protocol.
+ gles2_helper_.reset(new gles2::GLES2CmdHelper(command_buffer_.get()));
+ ASSERT_TRUE(gles2_helper_->Initialize(kCommandBufferSize));
+
+ // Create a transfer buffer.
+ transfer_buffer_.reset(new TransferBuffer(gles2_helper_.get()));
+
+ // Create the object exposing the OpenGL API.
+ gles2_implementation_.reset(
+ new gles2::GLES2Implementation(gles2_helper_.get(),
+ client_share_group,
+ transfer_buffer_.get(),
+ options.bind_generates_resource,
+ options.lose_context_when_out_of_memory,
+ this));
+
+ ASSERT_TRUE(gles2_implementation_->Initialize(
+ kStartTransferBufferSize,
+ kMinTransferBufferSize,
+ kMaxTransferBufferSize,
+ gpu::gles2::GLES2Implementation::kNoLimit))
+ << "Could not init GLES2Implementation";
+
+ MakeCurrent();
+}
+
+void GLManager::SetupBaseContext() {
+ if (use_count_) {
+ #if defined(OS_ANDROID)
+ base_share_group_ = new scoped_refptr<gfx::GLShareGroup>(
+ new gfx::GLShareGroup);
+ gfx::Size size(4, 4);
+ base_surface_ = new scoped_refptr<gfx::GLSurface>(
+ gfx::GLSurface::CreateOffscreenGLSurface(size));
+ gfx::GpuPreference gpu_preference(gfx::PreferDiscreteGpu);
+ base_context_ = new scoped_refptr<gfx::GLContext>(
+ gfx::GLContext::CreateGLContext(base_share_group_->get(),
+ base_surface_->get(),
+ gpu_preference));
+ #endif
+ }
+ ++use_count_;
+}
+
+void GLManager::MakeCurrent() {
+ ::gles2::SetGLContext(gles2_implementation_.get());
+}
+
+void GLManager::SetSurface(gfx::GLSurface* surface) {
+ decoder_->SetSurface(surface);
+}
+
+void GLManager::Destroy() {
+ if (gles2_implementation_.get()) {
+ MakeCurrent();
+ EXPECT_TRUE(glGetError() == GL_NONE);
+ gles2_implementation_->Flush();
+ gles2_implementation_.reset();
+ }
+ transfer_buffer_.reset();
+ gles2_helper_.reset();
+ command_buffer_.reset();
+ if (decoder_.get()) {
+ decoder_->MakeCurrent();
+ decoder_->Destroy(true);
+ decoder_.reset();
+ }
+}
+
+const gpu::gles2::FeatureInfo::Workarounds& GLManager::workarounds() const {
+ return decoder_->GetContextGroup()->feature_info()->workarounds();
+}
+
+void GLManager::PumpCommands() {
+ decoder_->MakeCurrent();
+ gpu_scheduler_->PutChanged();
+ ::gpu::CommandBuffer::State state = command_buffer_->GetLastState();
+ if (!context_lost_allowed_) {
+ ASSERT_EQ(::gpu::error::kNoError, state.error);
+ }
+}
+
+bool GLManager::GetBufferChanged(int32 transfer_buffer_id) {
+ return gpu_scheduler_->SetGetBuffer(transfer_buffer_id);
+}
+
+Capabilities GLManager::GetCapabilities() {
+ return decoder_->GetCapabilities();
+}
+
+gfx::GpuMemoryBuffer* GLManager::CreateGpuMemoryBuffer(
+ size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) {
+ gfx::Size size(width, height);
+
+ *id = -1;
+
+ std::vector<unsigned char> data(
+ size.GetArea() * BytesPerPixel(internalformat), 0);
+ scoped_refptr<base::RefCountedBytes> bytes(new base::RefCountedBytes(data));
+ scoped_ptr<gfx::GpuMemoryBuffer> buffer(
+ new GpuMemoryBufferImpl(bytes.get(), size, internalformat));
+
+ static int32 next_id = 1;
+ int32 new_id = next_id++;
+
+ scoped_refptr<gfx::GLImageRefCountedMemory> image(
+ new gfx::GLImageRefCountedMemory(size, internalformat));
+ if (!image->Initialize(bytes.get()))
+ return NULL;
+
+ gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
+ DCHECK(image_manager);
+ image_manager->AddImage(image.get(), new_id);
+
+ *id = new_id;
+ DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
+ return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
+}
+
+void GLManager::DestroyGpuMemoryBuffer(int32 id) {
+ gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
+ DCHECK(image_manager);
+ image_manager->RemoveImage(id);
+
+ gpu_memory_buffers_.erase(id);
+}
+
+uint32 GLManager::InsertSyncPoint() {
+ NOTIMPLEMENTED();
+ return 0u;
+}
+
+uint32 GLManager::InsertFutureSyncPoint() {
+ NOTIMPLEMENTED();
+ return 0u;
+}
+
+void GLManager::RetireSyncPoint(uint32 sync_point) {
+ NOTIMPLEMENTED();
+}
+
+void GLManager::SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) {
+ NOTIMPLEMENTED();
+}
+
+void GLManager::SignalQuery(uint32 query, const base::Closure& callback) {
+ NOTIMPLEMENTED();
+}
+
+void GLManager::SetSurfaceVisible(bool visible) {
+ NOTIMPLEMENTED();
+}
+
+uint32 GLManager::CreateStreamTexture(uint32 texture_id) {
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_manager.h b/gpu/command_buffer/tests/gl_manager.h
new file mode 100644
index 0000000..9f58e81
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_manager.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_TESTS_GL_MANAGER_H_
+#define GPU_COMMAND_BUFFER_TESTS_GL_MANAGER_H_
+
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "ui/gfx/size.h"
+
+namespace gfx {
+
+class GLContext;
+class GLShareGroup;
+class GLSurface;
+
+}
+
+namespace gpu {
+
+class CommandBufferService;
+class GpuScheduler;
+class TransferBuffer;
+
+namespace gles2 {
+
+class ContextGroup;
+class MailboxManager;
+class GLES2Decoder;
+class GLES2CmdHelper;
+class GLES2Implementation;
+class ImageManager;
+class ShareGroup;
+
+};
+
+class GLManager : private GpuControl {
+ public:
+ struct Options {
+ Options();
+ // The size of the backbuffer.
+ gfx::Size size;
+ // If not null will share resources with this context.
+ GLManager* share_group_manager;
+ // If not null will share a mailbox manager with this context.
+ GLManager* share_mailbox_manager;
+ // If not null will create a virtual manager based on this context.
+ GLManager* virtual_manager;
+ // Whether or not glBindXXX generates a resource.
+ bool bind_generates_resource;
+ // Whether or not the context is auto-lost when GL_OUT_OF_MEMORY occurs.
+ bool lose_context_when_out_of_memory;
+ // Whether or not it's ok to lose the context.
+ bool context_lost_allowed;
+ };
+ GLManager();
+ virtual ~GLManager();
+
+ void Initialize(const Options& options);
+ void Destroy();
+
+ void MakeCurrent();
+
+ void SetSurface(gfx::GLSurface* surface);
+
+ gles2::GLES2Decoder* decoder() const {
+ return decoder_.get();
+ }
+
+ gles2::MailboxManager* mailbox_manager() const {
+ return mailbox_manager_.get();
+ }
+
+ gfx::GLShareGroup* share_group() const {
+ return share_group_.get();
+ }
+
+ gles2::GLES2Implementation* gles2_implementation() const {
+ return gles2_implementation_.get();
+ }
+
+ gfx::GLContext* context() {
+ return context_.get();
+ }
+
+ const gpu::gles2::FeatureInfo::Workarounds& workarounds() const;
+
+ // GpuControl implementation.
+ virtual Capabilities GetCapabilities() OVERRIDE;
+ virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id) OVERRIDE;
+ virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
+ virtual uint32 InsertSyncPoint() OVERRIDE;
+ virtual uint32 InsertFutureSyncPoint() OVERRIDE;
+ virtual void RetireSyncPoint(uint32 sync_point) OVERRIDE;
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SignalQuery(uint32 query,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+ virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
+
+ private:
+ void PumpCommands();
+ bool GetBufferChanged(int32 transfer_buffer_id);
+ void SetupBaseContext();
+
+ scoped_refptr<gles2::MailboxManager> mailbox_manager_;
+ scoped_refptr<gfx::GLShareGroup> share_group_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<gles2::GLES2Decoder> decoder_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<gfx::GLContext> context_;
+ scoped_ptr<gles2::GLES2CmdHelper> gles2_helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ scoped_ptr<gles2::GLES2Implementation> gles2_implementation_;
+ bool context_lost_allowed_;
+
+ // Client GpuControl implementation.
+ base::ScopedPtrHashMap<int32, gfx::GpuMemoryBuffer> gpu_memory_buffers_;
+
+ // Used on Android to virtualize GL for all contexts.
+ static int use_count_;
+ static scoped_refptr<gfx::GLShareGroup>* base_share_group_;
+ static scoped_refptr<gfx::GLSurface>* base_surface_;
+ static scoped_refptr<gfx::GLContext>* base_context_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_TESTS_GL_MANAGER_H_
diff --git a/gpu/command_buffer/tests/gl_pointcoord_unittest.cc b/gpu/command_buffer/tests/gl_pointcoord_unittest.cc
new file mode 100644
index 0000000..fe71eed
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_pointcoord_unittest.cc
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class PointCoordTest : public testing::Test {
+ public:
+ static const GLsizei kResolution = 256;
+
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kResolution, kResolution);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLuint SetupQuad(GLint position_location, GLfloat pixel_offset);
+
+ GLManager gl_;
+};
+
+GLuint PointCoordTest::SetupQuad(
+ GLint position_location, GLfloat pixel_offset) {
+ GLuint vbo = 0;
+ glGenBuffers(1, &vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ float vertices[] = {
+ -0.5f + pixel_offset, -0.5f + pixel_offset,
+ 0.5f + pixel_offset, -0.5f + pixel_offset,
+ -0.5f + pixel_offset, 0.5f + pixel_offset,
+ 0.5f + pixel_offset, 0.5f + pixel_offset,
+ };
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+ glEnableVertexAttribArray(position_location);
+ glVertexAttribPointer(position_location, 2, GL_FLOAT, GL_FALSE, 0, 0);
+
+ return vbo;
+}
+
+namespace {
+
+struct FormatType {
+ GLenum format;
+ GLenum type;
+};
+
+GLfloat s2p(GLfloat s) {
+ return (s + 1.0) * 0.5 * PointCoordTest::kResolution;
+}
+
+} // anonymous namespace
+
+// crbug.com/162976
+// Flaky on Linux ATI bot.
+#if (defined(OS_LINUX) && defined(NDEBUG))
+#define MAYBE_RenderTo DISABLED_RenderTo
+#else
+#define MAYBE_RenderTo RenderTo
+#endif
+
+TEST_F(PointCoordTest, MAYBE_RenderTo) {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ uniform float u_pointsize;
+ void main()
+ {
+ gl_PointSize = u_pointsize;
+ gl_Position = a_position;
+ }
+ );
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ void main()
+ {
+ gl_FragColor = vec4(
+ gl_PointCoord.x,
+ gl_PointCoord.y,
+ 0,
+ 1);
+ }
+ );
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+
+ GLint position_loc = glGetAttribLocation(program, "a_position");
+ GLint pointsize_loc = glGetUniformLocation(program, "u_pointsize");
+
+ GLint range[2] = { 0, 0 };
+ glGetIntegerv(GL_ALIASED_POINT_SIZE_RANGE, &range[0]);
+ GLint max_point_size = range[1];
+ EXPECT_GE(max_point_size, 1);
+
+ max_point_size = std::min(max_point_size, 64);
+ GLint point_width = max_point_size / kResolution;
+ GLint point_step = max_point_size / 4;
+ point_step = std::max(1, point_step);
+
+ glUniform1f(pointsize_loc, max_point_size);
+
+ GLfloat pixel_offset = (max_point_size % 2) ? (1.0f / kResolution) : 0;
+
+ SetupQuad(position_loc, pixel_offset);
+
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDrawArrays(GL_POINTS, 0, 4);
+
+ for (GLint py = 0; py < 2; ++py) {
+ for (GLint px = 0; px < 2; ++px) {
+ GLfloat point_x = -0.5 + px + pixel_offset;
+ GLfloat point_y = -0.5 + py + pixel_offset;
+ for (GLint yy = 0; yy < max_point_size; yy += point_step) {
+ for (GLint xx = 0; xx < max_point_size; xx += point_step) {
+ // formula for s and t from OpenGL ES 2.0 spec section 3.3
+ GLfloat xw = s2p(point_x);
+ GLfloat yw = s2p(point_y);
+ GLfloat u = xx / max_point_size * 2 - 1;
+ GLfloat v = yy / max_point_size * 2 - 1;
+ GLint xf = s2p(point_x + u * point_width);
+ GLint yf = s2p(point_y + v * point_width);
+ GLfloat s = 0.5 + (xf + 0.5 - xw) / max_point_size;
+ GLfloat t = 0.5 + (yf + 0.5 - yw) / max_point_size;
+ uint8 color[4] = {
+ static_cast<uint8>(s * 255),
+ static_cast<uint8>((1 - t) * 255),
+ 0,
+ 255,
+ };
+ GLTestHelper::CheckPixels(xf, yf, 1, 1, 4, color);
+ }
+ }
+ }
+ }
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/tests/gl_program_unittest.cc b/gpu/command_buffer/tests/gl_program_unittest.cc
new file mode 100644
index 0000000..b99aa58
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_program_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class GLProgramTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+TEST_F(GLProgramTest, GetSetUniform) {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_vertex;
+ attribute vec3 a_normal;
+
+ uniform mat4 u_modelViewProjMatrix;
+
+ struct MyStruct
+ {
+ int x;
+ int y;
+ };
+
+ uniform MyStruct u_struct;
+ uniform float u_array[4];
+
+ varying vec3 v_normal;
+
+ void main()
+ {
+ v_normal = a_normal;
+ gl_Position = u_modelViewProjMatrix * a_vertex +
+ vec4(u_struct.x, u_struct.y, 0, 1) +
+ vec4(u_array[0], u_array[1], u_array[2], u_array[3]);
+ }
+ );
+ static const char* f_shader_str = SHADER(
+ varying mediump vec3 v_normal;
+
+ void main()
+ {
+ gl_FragColor = vec4(v_normal/2.0+vec3(0.5), 1);
+ }
+ );
+
+ // Load the program.
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ // Relink program.
+ glLinkProgram(program);
+
+ // These tests will fail on NVidia if not worked around by
+ // command buffer.
+ GLint location_sx = glGetUniformLocation(program, "u_struct.x");
+ GLint location_array_0 = glGetUniformLocation(program, "u_array[0]");
+
+ glUniform1i(location_sx, 3);
+ glUniform1f(location_array_0, 123);
+
+ GLint int_value = 0;
+ GLfloat float_value = 0;
+
+ glGetUniformiv(program, location_sx, &int_value);
+ EXPECT_EQ(3, int_value);
+ glGetUniformfv(program, location_array_0, &float_value);
+ EXPECT_EQ(123.0f, float_value);
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(GLProgramTest, NewShaderInCurrentProgram) {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ void main()
+ {
+ gl_Position = a_position;
+ }
+ );
+ static const char* f_red_shader_str = SHADER(
+ void main()
+ {
+ gl_FragColor = vec4(1, 0, 0, 1);
+ }
+ );
+ static const char* f_blue_shader_str = SHADER(
+ void main()
+ {
+ gl_FragColor = vec4(0, 0, 1, 1);
+ }
+ );
+
+ // Load the program.
+ GLuint vs = GLTestHelper::LoadShader(GL_VERTEX_SHADER, v_shader_str);
+ GLuint fs = GLTestHelper::LoadShader(GL_FRAGMENT_SHADER, f_red_shader_str);
+ GLuint program = GLTestHelper::SetupProgram(vs, fs);
+ glUseProgram(program);
+ glShaderSource(fs, 1, &f_blue_shader_str, NULL);
+ glCompileShader(fs);
+ glLinkProgram(program);
+ // We specifically don't call UseProgram again.
+ GLuint position_loc = glGetAttribLocation(program, "a_position");
+ GLTestHelper::SetupUnitQuad(position_loc);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ uint8 expected_color[] = { 0, 0, 255, 255, };
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_color));
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(GLProgramTest, UniformsInCurrentProgram) {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ void main()
+ {
+ gl_Position = a_position;
+ }
+ );
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ uniform vec4 u_color;
+ void main()
+ {
+ gl_FragColor = u_color;;
+ }
+ );
+
+ // Load the program.
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+
+ // Relink.
+ glLinkProgram(program);
+
+ // This test will fail on NVidia Linux if not worked around.
+ GLint color_location = glGetUniformLocation(program, "u_color");
+ glUniform4f(color_location, 0.0f, 0.0f, 1.0f, 1.0f);
+
+ // We specifically don't call UseProgram again.
+ GLuint position_loc = glGetAttribLocation(program, "a_position");
+ GLTestHelper::SetupUnitQuad(position_loc);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ uint8 expected_color[] = { 0, 0, 255, 255, };
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_color));
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_query_unittest.cc b/gpu/command_buffer/tests/gl_query_unittest.cc
new file mode 100644
index 0000000..87235e5
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_query_unittest.cc
@@ -0,0 +1,195 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/threading/platform_thread.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class QueryTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+TEST_F(QueryTest, MultipleQueries) {
+ EXPECT_TRUE(GLTestHelper::HasExtension("GL_CHROMIUM_get_error_query"));
+ EXPECT_TRUE(GLTestHelper::HasExtension(
+ "GL_CHROMIUM_command_buffer_latency_query"));
+
+ GLuint error_query = 0;
+ GLuint commands_issue_query = 0;
+ glGenQueriesEXT(1, &error_query);
+ glGenQueriesEXT(1, &commands_issue_query);
+
+ GLuint available;
+ GLuint result;
+
+ base::TimeTicks before = base::TimeTicks::HighResNow();
+
+ // Begin two queries of different types
+ glBeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, commands_issue_query);
+ glBeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, error_query);
+
+ glEnable(GL_TEXTURE_2D); // Generates an INVALID_ENUM error
+
+ // End the two queries
+ glEndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
+ glEndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+
+ glFinish();
+
+ base::TimeTicks after = base::TimeTicks::HighResNow();
+
+ // Check that we got result on both queries.
+
+ available = 0;
+ result = 0;
+ glGetQueryObjectuivEXT(commands_issue_query,
+ GL_QUERY_RESULT_AVAILABLE_EXT,
+ &available);
+ EXPECT_TRUE(available);
+ glGetQueryObjectuivEXT(commands_issue_query, GL_QUERY_RESULT_EXT, &result);
+ // Sanity check - the resulting delta is shorter than the time it took to
+ // run this test.
+ EXPECT_LT(result, base::TimeDelta(after - before).InMicroseconds());
+
+ result = 0;
+ available = 0;
+ glGetQueryObjectuivEXT(error_query,
+ GL_QUERY_RESULT_AVAILABLE_EXT,
+ &available);
+ EXPECT_TRUE(available);
+ glGetQueryObjectuivEXT(error_query, GL_QUERY_RESULT_EXT, &result);
+ EXPECT_EQ(static_cast<uint32>(GL_INVALID_ENUM), result);
+}
+
+TEST_F(QueryTest, GetErrorBasic) {
+ EXPECT_TRUE(GLTestHelper::HasExtension("GL_CHROMIUM_get_error_query"));
+
+ GLuint query = 0;
+ glGenQueriesEXT(1, &query);
+
+ GLuint query_status = 0;
+ GLuint result = 0;
+
+ glBeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, query);
+ glEnable(GL_TEXTURE_2D); // Generates an INVALID_ENUM error
+ glEndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+
+ glFinish();
+
+ query_status = 0;
+ result = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &result);
+ EXPECT_TRUE(result);
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &query_status);
+ EXPECT_EQ(static_cast<uint32>(GL_INVALID_ENUM), query_status);
+}
+
+TEST_F(QueryTest, DISABLED_LatencyQueryBasic) {
+ EXPECT_TRUE(GLTestHelper::HasExtension(
+ "GL_CHROMIUM_command_buffer_latency_query"));
+
+ GLuint query = 0;
+ glGenQueriesEXT(1, &query);
+
+ GLuint query_result = 0;
+ GLuint available = 0;
+
+ // First test a query with a ~1ms "latency".
+ const unsigned int kExpectedLatencyMicroseconds = 2000;
+ const unsigned int kTimePrecisionMicroseconds = 1000;
+
+ glBeginQueryEXT(GL_LATENCY_QUERY_CHROMIUM, query);
+ // Usually, we want to measure gpu-side latency, but we fake it by
+ // adding client side latency for our test because it's easier.
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMicroseconds(kExpectedLatencyMicroseconds));
+ glEndQueryEXT(GL_LATENCY_QUERY_CHROMIUM);
+
+ glFinish();
+
+ query_result = 0;
+ available = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(available);
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &query_result);
+ EXPECT_GE(query_result, kExpectedLatencyMicroseconds
+ - kTimePrecisionMicroseconds);
+ EXPECT_LE(query_result, kExpectedLatencyMicroseconds
+ + kTimePrecisionMicroseconds);
+
+ // Then test a query with the lowest latency possible.
+ glBeginQueryEXT(GL_LATENCY_QUERY_CHROMIUM, query);
+ glEndQueryEXT(GL_LATENCY_QUERY_CHROMIUM);
+
+ glFinish();
+
+ query_result = 0;
+ available = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(available);
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &query_result);
+
+ EXPECT_LE(query_result, kTimePrecisionMicroseconds);
+}
+
+TEST_F(QueryTest, CommandsCompleted) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_sync_query")) {
+ LOG(INFO) << "GL_CHROMIUM_sync_query not supported. Skipping test...";
+ return;
+ }
+
+ GLuint query;
+ glGenQueriesEXT(1, &query);
+ glBeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, query);
+ glClearColor(0.0, 0.0, 1.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glEndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ glFlush();
+ GLuint result = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &result);
+ EXPECT_EQ(0u, result);
+ glDeleteQueriesEXT(1, &query);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(QueryTest, CommandsCompletedWithFinish) {
+ if (!GLTestHelper::HasExtension("GL_CHROMIUM_sync_query")) {
+ LOG(INFO) << "GL_CHROMIUM_sync_query not supported. Skipping test...";
+ return;
+ }
+
+ GLuint query;
+ glGenQueriesEXT(1, &query);
+ glBeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, query);
+ glClearColor(0.0, 0.0, 1.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glEndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ glFinish();
+ GLuint result = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &result);
+ EXPECT_EQ(1u, result);
+ glDeleteQueriesEXT(1, &query);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/tests/gl_readback_unittest.cc b/gpu/command_buffer/tests/gl_readback_unittest.cc
new file mode 100644
index 0000000..e67cbdc
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_readback_unittest.cc
@@ -0,0 +1,322 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include <cmath>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class GLReadbackTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ static void WaitForQueryCallback(int q, base::Closure cb) {
+ unsigned int done = 0;
+ glGetQueryObjectuivEXT(q, GL_QUERY_RESULT_AVAILABLE_EXT, &done);
+ if (done) {
+ cb.Run();
+ } else {
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&WaitForQueryCallback, q, cb),
+ base::TimeDelta::FromMilliseconds(3));
+ }
+ }
+
+ void WaitForQuery(int q) {
+ base::RunLoop run_loop;
+ WaitForQueryCallback(q, run_loop.QuitClosure());
+ run_loop.Run();
+ }
+
+ GLManager gl_;
+};
+
+
+TEST_F(GLReadbackTest, ReadPixelsWithPBOAndQuery) {
+ const GLint kBytesPerPixel = 4;
+ const GLint kWidth = 2;
+ const GLint kHeight = 2;
+
+ GLuint b, q;
+ glClearColor(0.0, 0.0, 1.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glGenBuffers(1, &b);
+ glGenQueriesEXT(1, &q);
+ glBindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, b);
+ glBufferData(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM,
+ kWidth * kHeight * kBytesPerPixel,
+ NULL,
+ GL_STREAM_READ);
+ glBeginQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM, q);
+ glReadPixels(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, 0);
+ glEndQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM);
+ glFlush();
+ WaitForQuery(q);
+
+ // TODO(hubbe): Check that glMapBufferCHROMIUM does not block here.
+ unsigned char *data = static_cast<unsigned char *>(
+ glMapBufferCHROMIUM(
+ GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM,
+ GL_READ_ONLY));
+ EXPECT_TRUE(data);
+ EXPECT_EQ(data[0], 0); // red
+ EXPECT_EQ(data[1], 0); // green
+ EXPECT_EQ(data[2], 255); // blue
+ glUnmapBufferCHROMIUM(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM);
+ glBindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, 0);
+ glDeleteBuffers(1, &b);
+ glDeleteQueriesEXT(1, &q);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+static float HalfToFloat32(uint16 value) {
+ int32 s = (value >> 15) & 0x00000001;
+ int32 e = (value >> 10) & 0x0000001f;
+ int32 m = value & 0x000003ff;
+
+ if (e == 0) {
+ if (m == 0) {
+ uint32 result = s << 31;
+ return bit_cast<float>(result);
+ } else {
+ while (!(m & 0x00000400)) {
+ m <<= 1;
+ e -= 1;
+ }
+
+ e += 1;
+ m &= ~0x00000400;
+ }
+ } else if (e == 31) {
+ if (m == 0) {
+ uint32 result = (s << 31) | 0x7f800000;
+ return bit_cast<float>(result);
+ } else {
+ uint32 result = (s << 31) | 0x7f800000 | (m << 13);
+ return bit_cast<float>(result);
+ }
+ }
+
+ e = e + (127 - 15);
+ m = m << 13;
+
+ uint32 result = (s << 31) | (e << 23) | m;
+ return bit_cast<float>(result);
+}
+
+static GLuint CompileShader(GLenum type, const char *data) {
+ const char *shaderStrings[1] = { data };
+
+ GLuint shader = glCreateShader(type);
+ glShaderSource(shader, 1, shaderStrings, NULL);
+ glCompileShader(shader);
+
+ GLint compile_status = 0;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
+ if (compile_status != GL_TRUE) {
+ glDeleteShader(shader);
+ shader = 0;
+ }
+
+ return shader;
+}
+
+TEST_F(GLReadbackTest, ReadPixelsFloat) {
+ const GLsizei kTextureSize = 4;
+ const GLfloat kDrawColor[4] = { -10.9f, 0.5f, 10.5f, 100.12f };
+ const GLfloat kEpsilon = 0.01f;
+
+ struct TestFormat {
+ GLint format;
+ GLint type;
+ uint32 comp_count;
+ };
+ TestFormat test_formats[4];
+ size_t test_count = 0;
+ const char *extensions = reinterpret_cast<const char*>(
+ glGetString(GL_EXTENSIONS));
+ if (strstr(extensions, "GL_OES_texture_half_float") != NULL) {
+ TestFormat rgb16f = { GL_RGB, GL_HALF_FLOAT_OES, 3 };
+ test_formats[test_count++] = rgb16f;
+
+ TestFormat rgba16f = { GL_RGBA, GL_HALF_FLOAT_OES, 4 };
+ test_formats[test_count++] = rgba16f;
+ }
+ if (strstr(extensions, "GL_OES_texture_float") != NULL) {
+ TestFormat rgb32f = { GL_RGB, GL_FLOAT, 3 };
+ test_formats[test_count++] = rgb32f;
+
+ TestFormat rgba32f = { GL_RGBA, GL_FLOAT, 4 };
+ test_formats[test_count++] = rgba32f;
+ }
+
+ const char *vs_source =
+ "precision mediump float;\n"
+ "attribute vec4 a_position;\n"
+ "void main() {\n"
+ " gl_Position = a_position;\n"
+ "}\n";
+
+ GLuint vertex_shader = CompileShader(GL_VERTEX_SHADER, vs_source);
+ ASSERT_NE(vertex_shader, GLuint(0));
+
+ const char *fs_source =
+ "precision mediump float;\n"
+ "uniform vec4 u_color;\n"
+ "void main() {\n"
+ " gl_FragColor = u_color;\n"
+ "}\n";
+
+ GLuint fragment_shader = CompileShader(GL_FRAGMENT_SHADER, fs_source);
+ ASSERT_NE(fragment_shader, GLuint(0));
+
+ GLuint program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glDeleteShader(vertex_shader);
+ glAttachShader(program, fragment_shader);
+ glDeleteShader(fragment_shader);
+ glLinkProgram(program);
+
+ GLint link_status = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &link_status);
+ if (link_status != GL_TRUE) {
+ glDeleteProgram(program);
+ program = 0;
+ }
+ ASSERT_NE(program, GLuint(0));
+
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+
+ float quad_vertices[] = {
+ -1.0, -1.0,
+ 1.0, -1.0,
+ 1.0, 1.0,
+ -1.0, 1.0
+ };
+
+ GLuint vertex_buffer;
+ glGenBuffers(1, &vertex_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ glBufferData(
+ GL_ARRAY_BUFFER, sizeof(quad_vertices),
+ reinterpret_cast<void*>(quad_vertices), GL_STATIC_DRAW);
+
+ GLint position_location = glGetAttribLocation(program, "a_position");
+ glVertexAttribPointer(
+ position_location, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), NULL);
+ glEnableVertexAttribArray(position_location);
+
+ glUseProgram(program);
+ glUniform4fv(glGetUniformLocation(program, "u_color"), 1, kDrawColor);
+
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+
+ for (size_t ii = 0; ii < test_count; ++ii) {
+ GLuint texture_id = 0;
+ glGenTextures(1, &texture_id);
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, test_formats[ii].format, kTextureSize, kTextureSize,
+ 0, test_formats[ii].format, test_formats[ii].type, NULL);
+
+ GLuint framebuffer = 0;
+ glGenFramebuffers(1, &framebuffer);
+ glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
+ glFramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture_id, 0);
+
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+
+ // Make sure this floating point framebuffer is supported
+ if (glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE) {
+ // Check if this implementation supports reading floats back from this
+ // framebuffer
+ GLint read_format = 0;
+ glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &read_format);
+ GLint read_type = 0;
+ glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &read_type);
+
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+
+ if ((read_format == GL_RGB || read_format == GL_RGBA) &&
+ read_type == test_formats[ii].type) {
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+
+ uint32 read_comp_count = 0;
+ switch (read_format) {
+ case GL_RGB:
+ read_comp_count = 3;
+ break;
+ case GL_RGBA:
+ read_comp_count = 4;
+ break;
+ }
+
+ switch (read_type) {
+ case GL_HALF_FLOAT_OES: {
+ scoped_ptr<GLushort[]> buf(
+ new GLushort[kTextureSize * kTextureSize * read_comp_count]);
+ glReadPixels(
+ 0, 0, kTextureSize, kTextureSize, read_format, read_type,
+ buf.get());
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+ for (uint32 jj = 0; jj < kTextureSize * kTextureSize; ++jj) {
+ for (uint32 kk = 0; kk < test_formats[ii].comp_count; ++kk) {
+ EXPECT_LE(
+ std::abs(HalfToFloat32(buf[jj * read_comp_count + kk]) -
+ kDrawColor[kk]),
+ std::abs(kDrawColor[kk] * kEpsilon));
+ }
+ }
+ break;
+ }
+ case GL_FLOAT: {
+ scoped_ptr<GLfloat[]> buf(
+ new GLfloat[kTextureSize * kTextureSize * read_comp_count]);
+ glReadPixels(
+ 0, 0, kTextureSize, kTextureSize, read_format, read_type,
+ buf.get());
+ EXPECT_EQ(glGetError(), GLenum(GL_NO_ERROR));
+ for (uint32 jj = 0; jj < kTextureSize * kTextureSize; ++jj) {
+ for (uint32 kk = 0; kk < test_formats[ii].comp_count; ++kk) {
+ EXPECT_LE(
+ std::abs(buf[jj * read_comp_count + kk] - kDrawColor[kk]),
+ std::abs(kDrawColor[kk] * kEpsilon));
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ glDeleteFramebuffers(1, &framebuffer);
+ glDeleteTextures(1, &texture_id);
+ }
+
+ glDeleteBuffers(1, &vertex_buffer);
+ glDeleteProgram(program);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/tests/gl_shared_resources_unittest.cc b/gpu/command_buffer/tests/gl_shared_resources_unittest.cc
new file mode 100644
index 0000000..136b57a
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_shared_resources_unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class GLSharedResources : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.bind_generates_resource = true;
+ gl1_.Initialize(options);
+ options.share_group_manager = &gl1_;
+ gl2_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl1_.Destroy();
+ gl2_.Destroy();
+ }
+
+ GLManager gl1_;
+ GLManager gl2_;
+};
+
+// Test that GL creating/deleting works across context.
+TEST_F(GLSharedResources, CreateDelete) {
+ gl1_.MakeCurrent();
+ GLuint tex = 0;
+ glGenTextures(1, &tex);
+ gl2_.MakeCurrent();
+ glBindTexture(GL_TEXTURE_2D, tex);
+ glDeleteTextures(1, &tex);
+ gl1_.MakeCurrent();
+ glBindTexture(GL_TEXTURE_2D,tex);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+ gl2_.MakeCurrent();
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_stream_draw_unittest.cc b/gpu/command_buffer/tests/gl_stream_draw_unittest.cc
new file mode 100644
index 0000000..7ccd48d
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_stream_draw_unittest.cc
@@ -0,0 +1,160 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class GLStreamDrawTest : public testing::Test {
+ protected:
+ static const int kSize = 4;
+
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kSize, kSize);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+namespace {
+
+GLuint SetupProgram() {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ attribute vec4 a_color;
+ varying vec4 v_color;
+ void main()
+ {
+ gl_Position = a_position;
+ v_color = a_color;
+ }
+ );
+
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ varying vec4 v_color;
+ void main()
+ {
+ gl_FragColor = v_color;
+ }
+ );
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ return program;
+}
+
+} // anonymous namespace.
+
+TEST_F(GLStreamDrawTest, Basic) {
+ static GLfloat float_red[4] = { 1.0f, 0.0f, 0.0f, 1.0f, };
+ static GLfloat float_green[4] = { 0.0f, 1.0f, 0.0f, 1.0f, };
+ static uint8 expected_red[4] = {255, 0, 0, 255, };
+ static uint8 expected_green[4] = {0, 255, 0, 255, };
+
+ GLuint program = SetupProgram();
+ GLuint position_loc = glGetAttribLocation(program, "a_position");
+ GLuint color_loc = glGetAttribLocation(program, "a_color");
+ GLTestHelper::SetupUnitQuad(position_loc);
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_red, GL_STREAM_DRAW);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_red));
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_green, GL_STATIC_DRAW);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_green));
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+// http://crbug.com/281565
+#if !defined(OS_ANDROID)
+TEST_F(GLStreamDrawTest, DrawElements) {
+ static GLfloat float_red[4] = { 1.0f, 0.0f, 0.0f, 1.0f, };
+ static GLfloat float_green[4] = { 0.0f, 1.0f, 0.0f, 1.0f, };
+ static uint8 expected_red[4] = {255, 0, 0, 255, };
+ static uint8 expected_green[4] = {0, 255, 0, 255, };
+
+ GLuint program = SetupProgram();
+ GLuint position_loc = glGetAttribLocation(program, "a_position");
+ GLuint color_loc = glGetAttribLocation(program, "a_color");
+ GLTestHelper::SetupUnitQuad(position_loc);
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_red, GL_STREAM_DRAW);
+
+ GLuint index_buffer = 0;
+ glGenBuffers(1, &index_buffer);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer);
+ static GLubyte indices[] = { 0, 1, 2, 3, 4, 5, };
+ glBufferData(
+ GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STREAM_DRAW);
+ glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, NULL);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_red));
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_green, GL_STATIC_DRAW);
+
+ glBufferData(
+ GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
+ glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, NULL);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_green));
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+#endif
+
+TEST_F(GLStreamDrawTest, VertexArrayObjects) {
+ if (!GLTestHelper::HasExtension("GL_OES_vertex_array_object")) {
+ return;
+ }
+
+ static GLfloat float_red[4] = { 1.0f, 0.0f, 0.0f, 1.0f, };
+ static GLfloat float_green[4] = { 0.0f, 1.0f, 0.0f, 1.0f, };
+ static uint8 expected_red[4] = {255, 0, 0, 255, };
+ static uint8 expected_green[4] = {0, 255, 0, 255, };
+
+ GLuint program = SetupProgram();
+ GLuint position_loc = glGetAttribLocation(program, "a_position");
+ GLuint color_loc = glGetAttribLocation(program, "a_color");
+
+ GLuint vaos[2];
+ glGenVertexArraysOES(2, vaos);
+
+ glBindVertexArrayOES(vaos[0]);
+ GLuint position_buffer = GLTestHelper::SetupUnitQuad(position_loc);
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_red, GL_STREAM_DRAW);
+
+ glBindVertexArrayOES(vaos[1]);
+ glBindBuffer(GL_ARRAY_BUFFER, position_buffer);
+ glEnableVertexAttribArray(position_loc);
+ glVertexAttribPointer(position_loc, 2, GL_FLOAT, GL_FALSE, 0, 0);
+ GLTestHelper::SetupColorsForUnitQuad(color_loc, float_green, GL_STATIC_DRAW);
+
+ for (int ii = 0; ii < 2; ++ii) {
+ glBindVertexArrayOES(vaos[0]);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_red));
+
+ glBindVertexArrayOES(vaos[1]);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kSize, kSize, 0, expected_green));
+ }
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_test_utils.cc b/gpu/command_buffer/tests/gl_test_utils.cc
new file mode 100644
index 0000000..0ab71da
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_test_utils.cc
@@ -0,0 +1,247 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include <string>
+#include <stdio.h>
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// GCC requires these declarations, but MSVC requires they not be present.
+#ifndef COMPILER_MSVC
+const uint8 GLTestHelper::kCheckClearValue;
+#endif
+
+bool GLTestHelper::HasExtension(const char* extension) {
+ std::string extensions(
+ reinterpret_cast<const char*>(glGetString(GL_EXTENSIONS)));
+ return extensions.find(extension) != std::string::npos;
+}
+
+bool GLTestHelper::CheckGLError(const char* msg, int line) {
+ bool success = true;
+ GLenum error = GL_NO_ERROR;
+ while ((error = glGetError()) != GL_NO_ERROR) {
+ success = false;
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), error)
+ << "GL ERROR in " << msg << " at line " << line << " : " << error;
+ }
+ return success;
+}
+
+GLuint GLTestHelper::LoadShader(GLenum type, const char* shaderSrc) {
+ GLuint shader = glCreateShader(type);
+ // Load the shader source
+ glShaderSource(shader, 1, &shaderSrc, NULL);
+ // Compile the shader
+ glCompileShader(shader);
+ // Check the compile status
+ GLint value = 0;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &value);
+ if (value == 0) {
+ char buffer[1024];
+ GLsizei length = 0;
+ glGetShaderInfoLog(shader, sizeof(buffer), &length, buffer);
+ std::string log(buffer, length);
+ EXPECT_EQ(1, value) << "Error compiling shader: " << log;
+ glDeleteShader(shader);
+ shader = 0;
+ }
+ return shader;
+}
+
+GLuint GLTestHelper::SetupProgram(
+ GLuint vertex_shader, GLuint fragment_shader) {
+ // Create the program object
+ GLuint program = glCreateProgram();
+ glAttachShader(program, vertex_shader);
+ glAttachShader(program, fragment_shader);
+ // Link the program
+ glLinkProgram(program);
+ // Check the link status
+ GLint linked = 0;
+ glGetProgramiv(program, GL_LINK_STATUS, &linked);
+ if (linked == 0) {
+ char buffer[1024];
+ GLsizei length = 0;
+ glGetProgramInfoLog(program, sizeof(buffer), &length, buffer);
+ std::string log(buffer, length);
+ EXPECT_EQ(1, linked) << "Error linking program: " << log;
+ glDeleteProgram(program);
+ program = 0;
+ }
+ return program;
+}
+
+GLuint GLTestHelper::LoadProgram(
+ const char* vertex_shader_source,
+ const char* fragment_shader_source) {
+ GLuint vertex_shader = LoadShader(
+ GL_VERTEX_SHADER, vertex_shader_source);
+ GLuint fragment_shader = LoadShader(
+ GL_FRAGMENT_SHADER, fragment_shader_source);
+ if (!vertex_shader || !fragment_shader) {
+ return 0;
+ }
+ return SetupProgram(vertex_shader, fragment_shader);
+}
+
+GLuint GLTestHelper::SetupUnitQuad(GLint position_location) {
+ GLuint vbo = 0;
+ glGenBuffers(1, &vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ static float vertices[] = {
+ 1.0f, 1.0f,
+ -1.0f, 1.0f,
+ -1.0f, -1.0f,
+ 1.0f, 1.0f,
+ -1.0f, -1.0f,
+ 1.0f, -1.0f,
+ };
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
+ glEnableVertexAttribArray(position_location);
+ glVertexAttribPointer(position_location, 2, GL_FLOAT, GL_FALSE, 0, 0);
+
+ return vbo;
+}
+
+GLuint GLTestHelper::SetupColorsForUnitQuad(
+ GLint location, const GLfloat color[4], GLenum usage) {
+ GLuint vbo = 0;
+ glGenBuffers(1, &vbo);
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ GLfloat vertices[6 * 4];
+ for (int ii = 0; ii < 6; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ vertices[ii * 4 + jj] = color[jj];
+ }
+ }
+ glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, usage);
+ glEnableVertexAttribArray(location);
+ glVertexAttribPointer(location, 4, GL_FLOAT, GL_FALSE, 0, 0);
+
+ return vbo;
+}
+
+bool GLTestHelper::CheckPixels(
+ GLint x, GLint y, GLsizei width, GLsizei height, GLint tolerance,
+ const uint8* color) {
+ GLsizei size = width * height * 4;
+ scoped_ptr<uint8[]> pixels(new uint8[size]);
+ memset(pixels.get(), kCheckClearValue, size);
+ glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels.get());
+ int bad_count = 0;
+ for (GLint yy = 0; yy < height; ++yy) {
+ for (GLint xx = 0; xx < width; ++xx) {
+ int offset = yy * width * 4 + xx * 4;
+ for (int jj = 0; jj < 4; ++jj) {
+ uint8 actual = pixels[offset + jj];
+ uint8 expected = color[jj];
+ int diff = actual - expected;
+ diff = diff < 0 ? -diff: diff;
+ if (diff > tolerance) {
+ EXPECT_EQ(expected, actual) << " at " << (xx + x) << ", " << (yy + y)
+ << " channel " << jj;
+ ++bad_count;
+ // Exit early just so we don't spam the log but we print enough
+ // to hopefully make it easy to diagnose the issue.
+ if (bad_count > 16) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return bad_count == 0;
+}
+
+namespace {
+
+void Set16BitValue(uint8 dest[2], uint16 value) {
+ dest[0] = value & 0xFFu;
+ dest[1] = value >> 8;
+}
+
+void Set32BitValue(uint8 dest[4], uint32 value) {
+ dest[0] = (value >> 0) & 0xFFu;
+ dest[1] = (value >> 8) & 0xFFu;
+ dest[2] = (value >> 16) & 0xFFu;
+ dest[3] = (value >> 24) & 0xFFu;
+}
+
+struct BitmapHeaderFile {
+ uint8 magic[2];
+ uint8 size[4];
+ uint8 reserved[4];
+ uint8 offset[4];
+};
+
+struct BitmapInfoHeader{
+ uint8 size[4];
+ uint8 width[4];
+ uint8 height[4];
+ uint8 planes[2];
+ uint8 bit_count[2];
+ uint8 compression[4];
+ uint8 size_image[4];
+ uint8 x_pels_per_meter[4];
+ uint8 y_pels_per_meter[4];
+ uint8 clr_used[4];
+ uint8 clr_important[4];
+};
+
+}
+
+bool GLTestHelper::SaveBackbufferAsBMP(
+ const char* filename, int width, int height) {
+ FILE* fp = fopen(filename, "wb");
+ EXPECT_TRUE(fp != NULL);
+ glPixelStorei(GL_PACK_ALIGNMENT, 1);
+ int num_pixels = width * height;
+ int size = num_pixels * 4;
+ scoped_ptr<uint8[]> data(new uint8[size]);
+ uint8* pixels = data.get();
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+
+ // RGBA to BGRA
+ for (int ii = 0; ii < num_pixels; ++ii) {
+ int offset = ii * 4;
+ uint8 t = pixels[offset + 0];
+ pixels[offset + 0] = pixels[offset + 2];
+ pixels[offset + 2] = t;
+ }
+
+ BitmapHeaderFile bhf;
+ BitmapInfoHeader bih;
+
+ bhf.magic[0] = 'B';
+ bhf.magic[1] = 'M';
+ Set32BitValue(bhf.size, 0);
+ Set32BitValue(bhf.reserved, 0);
+ Set32BitValue(bhf.offset, sizeof(bhf) + sizeof(bih));
+
+ Set32BitValue(bih.size, sizeof(bih));
+ Set32BitValue(bih.width, width);
+ Set32BitValue(bih.height, height);
+ Set16BitValue(bih.planes, 1);
+ Set16BitValue(bih.bit_count, 32);
+ Set32BitValue(bih.compression, 0);
+ Set32BitValue(bih.x_pels_per_meter, 0);
+ Set32BitValue(bih.y_pels_per_meter, 0);
+ Set32BitValue(bih.clr_used, 0);
+ Set32BitValue(bih.clr_important, 0);
+
+ fwrite(&bhf, sizeof(bhf), 1, fp);
+ fwrite(&bih, sizeof(bih), 1, fp);
+ fwrite(pixels, size, 1, fp);
+ fclose(fp);
+ return true;
+}
+
+int GLTestHelper::RunTests(int argc, char** argv) {
+ testing::InitGoogleMock(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/gpu/command_buffer/tests/gl_test_utils.h b/gpu/command_buffer/tests/gl_test_utils.h
new file mode 100644
index 0000000..972ea0a
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_test_utils.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper functions for GL.
+
+#ifndef GPU_COMMAND_BUFFER_TESTS_GL_TEST_UTILS_H_
+#define GPU_COMMAND_BUFFER_TESTS_GL_TEST_UTILS_H_
+
+#include <GLES2/gl2.h>
+#include "base/basictypes.h"
+
+class GLTestHelper {
+ public:
+ static const uint8 kCheckClearValue = 123u;
+
+ static bool HasExtension(const char* extension);
+ static bool CheckGLError(const char* msg, int line);
+
+ // Compiles a shader.
+ // Returns shader, 0 on failure..
+ static GLuint LoadShader(GLenum type, const char* shaderSrc);
+
+ // Attaches 2 shaders and links them to a program.
+ // Returns program, 0 on failure..
+ static GLuint SetupProgram(GLuint vertex_shader, GLuint fragment_shader);
+
+ // Compiles 2 shaders, attaches and links them to a program
+ // Returns program, 0 on failure.
+ static GLuint LoadProgram(
+ const char* vertex_shader_source,
+ const char* fragment_shader_source);
+
+ // Make a unit quad with position only.
+ // Returns the created buffer.
+ static GLuint SetupUnitQuad(GLint position_location);
+
+ // Make a 6 vertex colors.
+ // Returns the created buffer.
+ static GLuint SetupColorsForUnitQuad(
+ GLint location, const GLfloat color[4], GLenum usage);
+
+ // Checks an area of pixels for a color.
+ static bool CheckPixels(
+ GLint x, GLint y, GLsizei width, GLsizei height, GLint tolerance,
+ const uint8* color);
+
+ // Uses ReadPixels to save an area of the current FBO/Backbuffer.
+ static bool SaveBackbufferAsBMP(const char* filename, int width, int height);
+
+ // Run unit tests.
+ static int RunTests(int argc, char** argv);
+};
+
+#endif // GPU_COMMAND_BUFFER_TESTS_GL_TEST_UTILS_H_
diff --git a/gpu/command_buffer/tests/gl_tests_main.cc b/gpu/command_buffer/tests/gl_tests_main.cc
new file mode 100644
index 0000000..460ecae
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_tests_main.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "gpu/config/gpu_util.h"
+#include "ui/gl/gl_surface.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#include "ui/gl/android/gl_jni_registrar.h"
+#endif
+
+int main(int argc, char** argv) {
+#if defined(OS_ANDROID)
+ ui::gl::android::RegisterJni(base::android::AttachCurrentThread());
+#else
+ base::AtExitManager exit_manager;
+#endif
+ CommandLine::Init(argc, argv);
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool pool;
+#endif
+ gfx::GLSurface::InitializeOneOff();
+ ::gles2::Initialize();
+ gpu::ApplyGpuDriverBugWorkarounds(CommandLine::ForCurrentProcess());
+ base::MessageLoop main_message_loop;
+ return GLTestHelper::RunTests(argc, argv);
+}
+
+
diff --git a/gpu/command_buffer/tests/gl_texture_mailbox_unittest.cc b/gpu/command_buffer/tests/gl_texture_mailbox_unittest.cc
new file mode 100644
index 0000000..1ce8303
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_texture_mailbox_unittest.cc
@@ -0,0 +1,454 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_share_group.h"
+
+namespace gpu {
+
+namespace {
+uint32 ReadTexel(GLuint id, GLint x, GLint y) {
+ GLint old_fbo = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &old_fbo);
+
+ GLuint fbo;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ id,
+ 0);
+ // Some drivers (NVidia/SGX) require texture settings to be a certain way or
+ // they won't report FRAMEBUFFER_COMPLETE.
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+
+ uint32 texel = 0;
+ glReadPixels(x, y, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, &texel);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ glBindFramebuffer(GL_FRAMEBUFFER, old_fbo);
+
+ glDeleteFramebuffers(1, &fbo);
+
+ return texel;
+}
+}
+
+class GLTextureMailboxTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl1_.Initialize(GLManager::Options());
+ GLManager::Options options;
+ options.share_mailbox_manager = &gl1_;
+ gl2_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl1_.Destroy();
+ gl2_.Destroy();
+ }
+
+ GLManager gl1_;
+ GLManager gl2_;
+};
+
+TEST_F(GLTextureMailboxTest, ProduceAndConsumeTexture) {
+ gl1_.MakeCurrent();
+
+ GLbyte mailbox1[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox1);
+
+ GLbyte mailbox2[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox2);
+
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ uint32 source_pixel = 0xFF0000FF;
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1, 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+
+ glProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox1);
+ glFlush();
+
+ gl2_.MakeCurrent();
+
+ GLuint tex2;
+ glGenTextures(1, &tex2);
+
+ glBindTexture(GL_TEXTURE_2D, tex2);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox1);
+ EXPECT_EQ(source_pixel, ReadTexel(tex2, 0, 0));
+ glProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox2);
+ glFlush();
+
+ gl1_.MakeCurrent();
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox2);
+ EXPECT_EQ(source_pixel, ReadTexel(tex1, 0, 0));
+}
+
+TEST_F(GLTextureMailboxTest, ProduceAndConsumeTextureRGB) {
+ gl1_.MakeCurrent();
+
+ GLbyte mailbox1[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox1);
+
+ GLbyte mailbox2[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox2);
+
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ uint32 source_pixel = 0xFF000000;
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGB,
+ 1, 1,
+ 0,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+
+ glProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox1);
+ glFlush();
+
+ gl2_.MakeCurrent();
+
+ GLuint tex2;
+ glGenTextures(1, &tex2);
+
+ glBindTexture(GL_TEXTURE_2D, tex2);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox1);
+ EXPECT_EQ(source_pixel, ReadTexel(tex2, 0, 0));
+ glProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox2);
+ glFlush();
+
+ gl1_.MakeCurrent();
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox2);
+ EXPECT_EQ(source_pixel, ReadTexel(tex1, 0, 0));
+}
+
+TEST_F(GLTextureMailboxTest, ProduceAndConsumeTextureDirect) {
+ gl1_.MakeCurrent();
+
+ GLbyte mailbox1[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox1);
+
+ GLbyte mailbox2[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox2);
+
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ uint32 source_pixel = 0xFF0000FF;
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1, 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+
+ glProduceTextureDirectCHROMIUM(tex1, GL_TEXTURE_2D, mailbox1);
+ glFlush();
+
+ gl2_.MakeCurrent();
+
+ GLuint tex2 = glCreateAndConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox1);
+ glBindTexture(GL_TEXTURE_2D, tex2);
+ EXPECT_EQ(source_pixel, ReadTexel(tex2, 0, 0));
+ glProduceTextureDirectCHROMIUM(tex2, GL_TEXTURE_2D, mailbox2);
+ glFlush();
+
+ gl1_.MakeCurrent();
+
+ GLuint tex3 = glCreateAndConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox2);
+ glBindTexture(GL_TEXTURE_2D, tex3);
+ EXPECT_EQ(source_pixel, ReadTexel(tex3, 0, 0));
+}
+
+TEST_F(GLTextureMailboxTest, ConsumeTextureValidatesKey) {
+ GLuint tex;
+ glGenTextures(1, &tex);
+
+ glBindTexture(GL_TEXTURE_2D, tex);
+ uint32 source_pixel = 0xFF0000FF;
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1, 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+
+ GLbyte invalid_mailbox[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(invalid_mailbox);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, invalid_mailbox);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+
+ // Ensure level 0 is still intact after glConsumeTextureCHROMIUM fails.
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ EXPECT_EQ(source_pixel, ReadTexel(tex, 0, 0));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+}
+
+TEST_F(GLTextureMailboxTest, SharedTextures) {
+ gl1_.MakeCurrent();
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ uint32 source_pixel = 0xFF0000FF;
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 1, 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+ GLbyte mailbox[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox);
+
+ glProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glFlush();
+
+ gl2_.MakeCurrent();
+ GLuint tex2;
+ glGenTextures(1, &tex2);
+
+ glBindTexture(GL_TEXTURE_2D, tex2);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // Change texture in context 2.
+ source_pixel = 0xFF00FF00;
+ glTexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 0, 0,
+ 1, 1,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glFlush();
+
+ // Check it in context 1.
+ gl1_.MakeCurrent();
+ EXPECT_EQ(source_pixel, ReadTexel(tex1, 0, 0));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // Change parameters (note: ReadTexel will reset those).
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR_MIPMAP_NEAREST);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glFlush();
+
+ // Check in context 2.
+ gl2_.MakeCurrent();
+ GLint parameter = 0;
+ glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, ¶meter);
+ EXPECT_EQ(GL_REPEAT, parameter);
+ parameter = 0;
+ glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, ¶meter);
+ EXPECT_EQ(GL_LINEAR, parameter);
+ parameter = 0;
+ glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, ¶meter);
+ EXPECT_EQ(GL_LINEAR_MIPMAP_NEAREST, parameter);
+
+ // Delete texture in context 1.
+ gl1_.MakeCurrent();
+ glDeleteTextures(1, &tex1);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // Check texture still exists in context 2.
+ gl2_.MakeCurrent();
+ EXPECT_EQ(source_pixel, ReadTexel(tex2, 0, 0));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // The mailbox should still exist too.
+ GLuint tex3;
+ glGenTextures(1, &tex3);
+ glBindTexture(GL_TEXTURE_2D, tex3);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // Delete both textures.
+ glDeleteTextures(1, &tex2);
+ glDeleteTextures(1, &tex3);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ // Mailbox should be gone now.
+ glGenTextures(1, &tex2);
+ glBindTexture(GL_TEXTURE_2D, tex2);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+ glDeleteTextures(1, &tex2);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+}
+
+TEST_F(GLTextureMailboxTest, ProduceFrontBuffer) {
+ gl1_.MakeCurrent();
+ Mailbox mailbox;
+ glGenMailboxCHROMIUM(mailbox.name);
+
+ gl2_.MakeCurrent();
+ gl2_.decoder()->ProduceFrontBuffer(mailbox);
+
+ gl1_.MakeCurrent();
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+ glBindTexture(GL_TEXTURE_2D, tex1);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ gl2_.MakeCurrent();
+ glResizeCHROMIUM(10, 10, 1);
+ glClearColor(1, 0, 0, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+ ::gles2::GetGLContext()->SwapBuffers();
+
+ gl1_.MakeCurrent();
+ EXPECT_EQ(0xFF0000FFu, ReadTexel(tex1, 0, 0));
+ EXPECT_EQ(0xFF0000FFu, ReadTexel(tex1, 9, 9));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ gl2_.MakeCurrent();
+ glClearColor(0, 1, 0, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glFlush();
+
+ gl1_.MakeCurrent();
+ EXPECT_EQ(0xFF0000FFu, ReadTexel(tex1, 0, 0));
+
+ gl2_.MakeCurrent();
+ ::gles2::GetGLContext()->SwapBuffers();
+
+ gl1_.MakeCurrent();
+ EXPECT_EQ(0xFF00FF00u, ReadTexel(tex1, 0, 0));
+
+ gl2_.MakeCurrent();
+ gl2_.Destroy();
+
+ gl1_.MakeCurrent();
+ EXPECT_EQ(0xFF00FF00u, ReadTexel(tex1, 0, 0));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glDeleteTextures(1, &tex1);
+}
+
+TEST_F(GLTextureMailboxTest, ProduceTextureDirectInvalidTarget) {
+ gl1_.MakeCurrent();
+
+ GLbyte mailbox1[GL_MAILBOX_SIZE_CHROMIUM];
+ glGenMailboxCHROMIUM(mailbox1);
+
+ GLuint tex1;
+ glGenTextures(1, &tex1);
+
+ glBindTexture(GL_TEXTURE_CUBE_MAP, tex1);
+ uint32 source_pixel = 0xFF0000FF;
+ glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X,
+ 0,
+ GL_RGBA,
+ 1, 1,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ &source_pixel);
+
+ glProduceTextureDirectCHROMIUM(tex1, GL_TEXTURE_2D, mailbox1);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+}
+
+// http://crbug.com/281565
+#if !defined(OS_ANDROID)
+TEST_F(GLTextureMailboxTest, ProduceFrontBufferMultipleContexts) {
+ gl1_.MakeCurrent();
+ Mailbox mailbox[2];
+ glGenMailboxCHROMIUM(mailbox[0].name);
+ glGenMailboxCHROMIUM(mailbox[1].name);
+ GLuint tex[2];
+ glGenTextures(2, tex);
+
+ GLManager::Options options;
+ options.share_mailbox_manager = &gl1_;
+ GLManager other_gl[2];
+ for (size_t i = 0; i < 2; ++i) {
+ other_gl[i].Initialize(options);
+ other_gl[i].MakeCurrent();
+ other_gl[i].decoder()->ProduceFrontBuffer(mailbox[i]);
+ // Make sure both "other gl" are in the same share group.
+ if (!options.share_group_manager)
+ options.share_group_manager = other_gl+i;
+ }
+
+
+ gl1_.MakeCurrent();
+ for (size_t i = 0; i < 2; ++i) {
+ glBindTexture(GL_TEXTURE_2D, tex[i]);
+ glConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox[i].name);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+
+ for (size_t i = 0; i < 2; ++i) {
+ other_gl[i].MakeCurrent();
+ glResizeCHROMIUM(10, 10, 1);
+ glClearColor(1-i%2, i%2, 0, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+ ::gles2::GetGLContext()->SwapBuffers();
+ }
+
+ gl1_.MakeCurrent();
+ EXPECT_EQ(0xFF0000FFu, ReadTexel(tex[0], 0, 0));
+ EXPECT_EQ(0xFF00FF00u, ReadTexel(tex[1], 9, 9));
+
+ for (size_t i = 0; i < 2; ++i) {
+ other_gl[i].MakeCurrent();
+ other_gl[i].Destroy();
+ }
+
+ gl1_.MakeCurrent();
+ glDeleteTextures(2, tex);
+}
+#endif
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_texture_storage_unittest.cc b/gpu/command_buffer/tests/gl_texture_storage_unittest.cc
new file mode 100644
index 0000000..b28baf0
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_texture_storage_unittest.cc
@@ -0,0 +1,160 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class TextureStorageTest : public testing::Test {
+ protected:
+ static const GLsizei kResolution = 64;
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kResolution, kResolution);
+ gl_.Initialize(options);
+ gl_.MakeCurrent();
+
+ glGenTextures(1, &tex_);
+ glBindTexture(GL_TEXTURE_2D, tex_);
+
+ glGenFramebuffers(1, &fbo_);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo_);
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ tex_,
+ 0);
+
+ const GLubyte* extensions = glGetString(GL_EXTENSIONS);
+ extension_available_ = strstr(reinterpret_cast<const char*>(
+ extensions), "GL_EXT_texture_storage");
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+ GLuint tex_;
+ GLuint fbo_;
+ bool extension_available_;
+};
+
+TEST_F(TextureStorageTest, CorrectPixels) {
+ if (!extension_available_)
+ return;
+
+ glTexStorage2DEXT(GL_TEXTURE_2D, 2, GL_RGBA8_OES, 2, 2);
+
+ uint8 source_pixels[16] = {
+ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4
+ };
+ glTexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ 0, 0,
+ 2, 2,
+ GL_RGBA, GL_UNSIGNED_BYTE,
+ source_pixels);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 2, 2, 0, source_pixels));
+}
+
+TEST_F(TextureStorageTest, IsImmutable) {
+ if (!extension_available_)
+ return;
+
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, 4, 4);
+
+ GLint param = 0;
+ glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_IMMUTABLE_FORMAT_EXT, ¶m);
+ EXPECT_TRUE(param);
+}
+
+TEST_F(TextureStorageTest, OneLevel) {
+ if (!extension_available_)
+ return;
+
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, 4, 4);
+
+ uint8 source_pixels[64] = { 0 };
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4,
+ GL_RGBA, GL_UNSIGNED_BYTE, source_pixels);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexSubImage2D(GL_TEXTURE_2D, 1, 0, 0, 2, 2,
+ GL_RGBA, GL_UNSIGNED_BYTE, source_pixels);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+}
+
+TEST_F(TextureStorageTest, MultipleLevels) {
+ if (!extension_available_)
+ return;
+
+ glTexStorage2DEXT(GL_TEXTURE_2D, 2, GL_RGBA8_OES, 2, 2);
+
+ uint8 source_pixels[16] = { 0 };
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 2, 2,
+ GL_RGBA, GL_UNSIGNED_BYTE, source_pixels);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexSubImage2D(GL_TEXTURE_2D, 1, 0, 0, 1, 1,
+ GL_RGBA, GL_UNSIGNED_BYTE, source_pixels);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexSubImage2D(GL_TEXTURE_2D, 2, 0, 0, 1, 1,
+ GL_RGBA, GL_UNSIGNED_BYTE, source_pixels);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+}
+
+TEST_F(TextureStorageTest, BadTarget) {
+ if (!extension_available_)
+ return;
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexStorage2DEXT(GL_TEXTURE_CUBE_MAP, 1, GL_RGBA8_OES, 4, 4);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), glGetError());
+}
+
+TEST_F(TextureStorageTest, InvalidId) {
+ if (!extension_available_)
+ return;
+
+ glDeleteTextures(1, &tex_);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, 4, 4);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+}
+
+TEST_F(TextureStorageTest, CannotRedefine) {
+ if (!extension_available_)
+ return;
+
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, 4, 4);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, 4, 4);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ 4, 4,
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError());
+}
+
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/tests/gl_unittest.cc b/gpu/command_buffer/tests/gl_unittest.cc
new file mode 100644
index 0000000..f5c380f
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class GLTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+// Test that GL is at least minimally working.
+TEST_F(GLTest, Basic) {
+ glClearColor(0.0f, 1.0f, 0.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ uint8 expected[] = { 0, 255, 0, 255, };
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected));
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(GLTest, BasicFBO) {
+ GLuint tex = 0;
+ glGenTextures(1, &tex);
+ GLuint fbo = 0;
+ glGenFramebuffers(1, &fbo);
+ glBindTexture(GL_TEXTURE_2D, tex);
+ scoped_ptr<uint8[]> pixels(new uint8 [16*16*4]);
+ memset(pixels.get(), 0, 16*16*4);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ pixels.get());
+ glGenerateMipmap(GL_TEXTURE_2D);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ tex, 0);
+ EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
+ glCheckFramebufferStatus(GL_FRAMEBUFFER));
+ glClearColor(0.0f, 1.0f, 0.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ uint8 expected[] = { 0, 255, 0, 255, };
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 16, 16, 0, expected));
+ glDeleteFramebuffers(1, &fbo);
+ glDeleteTextures(1, &tex);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+TEST_F(GLTest, SimpleShader) {
+ static const char* v_shader_str =
+ "attribute vec4 a_Position;\n"
+ "void main()\n"
+ "{\n"
+ " gl_Position = a_Position;\n"
+ "}\n";
+ static const char* f_shader_str =
+ "precision mediump float;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);\n"
+ "}\n";
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ GLuint position_loc = glGetAttribLocation(program, "a_Position");
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ uint8 expected_clear[] = { 127, 0, 255, 0, };
+ glClearColor(0.5f, 0.0f, 1.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 1, expected_clear));
+ uint8 expected_draw[] = { 0, 255, 0, 255, };
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, 1, 1, 0, expected_draw));
+}
+
+TEST_F(GLTest, GetString) {
+ EXPECT_STREQ(
+ "OpenGL ES 2.0 Chromium",
+ reinterpret_cast<const char*>(glGetString(GL_VERSION)));
+ EXPECT_STREQ(
+ "OpenGL ES GLSL ES 1.0 Chromium",
+ reinterpret_cast<const char*>(glGetString(GL_SHADING_LANGUAGE_VERSION)));
+ EXPECT_STREQ(
+ "Chromium",
+ reinterpret_cast<const char*>(glGetString(GL_RENDERER)));
+ EXPECT_STREQ(
+ "Chromium",
+ reinterpret_cast<const char*>(glGetString(GL_VENDOR)));
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_unittests_android.cc b/gpu/command_buffer/tests/gl_unittests_android.cc
new file mode 100644
index 0000000..27b0cb6
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_unittests_android.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include <android/native_window_jni.h>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_surface.h"
+
+namespace gpu {
+
+class GLSurfaceTextureTest : public testing::Test {
+ protected:
+ virtual void SetUp() OVERRIDE {
+ gl_.Initialize(GLManager::Options());
+ }
+
+ virtual void TearDown() OVERRIDE {
+ gl_.Destroy();
+ }
+
+ GLManager gl_;
+};
+
+TEST_F(GLSurfaceTextureTest, SimpleTest) {
+ // TODO(sievers): Eliminate the need for this by using a client-side
+ // abstraction for the SurfaceTexture in this test.
+ GLuint texture = 0xFEEDBEEF;
+
+ scoped_refptr<gfx::SurfaceTexture> surface_texture(
+ gfx::SurfaceTexture::Create(texture));
+ gfx::AcceleratedWidget window = surface_texture->CreateSurface();
+ EXPECT_TRUE(window != NULL);
+
+ scoped_refptr<gfx::GLSurface> gl_surface =
+ gfx::GLSurface::CreateViewGLSurface(window);
+ EXPECT_TRUE(gl_surface.get() != NULL);
+
+ gl_.SetSurface(gl_surface.get());
+
+ glClearColor(0.0f, 1.0f, 0.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+// glSwapBuffers();
+
+ surface_texture->UpdateTexImage();
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+
+ ANativeWindow_release(window);
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/gl_virtual_contexts_unittest.cc b/gpu/command_buffer/tests/gl_virtual_contexts_unittest.cc
new file mode 100644
index 0000000..17cfa9f
--- /dev/null
+++ b/gpu/command_buffer/tests/gl_virtual_contexts_unittest.cc
@@ -0,0 +1,304 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+
+#define SHADER(Src) #Src
+
+namespace gpu {
+
+class GLVirtualContextsTest : public testing::Test {
+ protected:
+ static const int kSize0 = 4;
+ static const int kSize1 = 8;
+ static const int kSize2 = 16;
+
+ static const GLfloat kFloatRed[4];
+ static const GLfloat kFloatGreen[4];
+ static const uint8 kExpectedRed[4];
+ static const uint8 kExpectedGreen[4];
+
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(kSize0, kSize0);
+ gl_real_.Initialize(options);
+ gl_real_shared_.Initialize(options);
+ options.virtual_manager = &gl_real_shared_;
+ options.size = gfx::Size(kSize1, kSize1);
+ gl1_.Initialize(options);
+ options.size = gfx::Size(kSize2, kSize2);
+ gl2_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl1_.Destroy();
+ gl2_.Destroy();
+ gl_real_shared_.Destroy();
+ gl_real_.Destroy();
+ }
+
+ GLuint SetupColoredVertexProgram() {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_position;
+ attribute vec4 a_color;
+ varying vec4 v_color;
+ void main()
+ {
+ gl_Position = a_position;
+ v_color = a_color;
+ }
+ );
+
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ varying vec4 v_color;
+ void main()
+ {
+ gl_FragColor = v_color;
+ }
+ );
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+ return program;
+ }
+
+ void SetUpColoredUnitQuad(const GLfloat* color) {
+ GLuint program1 = SetupColoredVertexProgram();
+ GLuint position_loc1 = glGetAttribLocation(program1, "a_position");
+ GLuint color_loc1 = glGetAttribLocation(program1, "a_color");
+ GLTestHelper::SetupUnitQuad(position_loc1);
+ GLTestHelper::SetupColorsForUnitQuad(color_loc1, color, GL_STATIC_DRAW);
+ }
+
+ GLManager gl_real_;
+ GLManager gl_real_shared_;
+ GLManager gl1_;
+ GLManager gl2_;
+};
+
+const GLfloat GLVirtualContextsTest::kFloatRed[4] = {
+ 1.0f, 0.0f, 0.0f, 1.0f,
+};
+const GLfloat GLVirtualContextsTest::kFloatGreen[4] = {
+ 0.0f, 1.0f, 0.0f, 1.0f,
+};
+const uint8 GLVirtualContextsTest::kExpectedRed[4] = {
+ 255, 0, 0, 255,
+};
+const uint8 GLVirtualContextsTest::kExpectedGreen[4] = {
+ 0, 255, 0, 255,
+};
+
+namespace {
+
+void SetupSimpleShader(const uint8* color) {
+ static const char* v_shader_str = SHADER(
+ attribute vec4 a_Position;
+ void main()
+ {
+ gl_Position = a_Position;
+ }
+ );
+
+ static const char* f_shader_str = SHADER(
+ precision mediump float;
+ uniform vec4 u_color;
+ void main()
+ {
+ gl_FragColor = u_color;
+ }
+ );
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+ glUseProgram(program);
+
+ GLuint position_loc = glGetAttribLocation(program, "a_Position");
+
+ GLTestHelper::SetupUnitQuad(position_loc);
+
+ GLuint color_loc = glGetUniformLocation(program, "u_color");
+ glUniform4f(
+ color_loc,
+ color[0] / 255.0f,
+ color[1] / 255.0f,
+ color[2] / 255.0f,
+ color[3] / 255.0f);
+}
+
+void TestDraw(int size) {
+ uint8 expected_clear[] = { 127, 0, 255, 0, };
+ glClearColor(0.5f, 0.0f, 1.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, size, size, 1, expected_clear));
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+}
+
+} // anonymous namespace
+
+// http://crbug.com/281565
+TEST_F(GLVirtualContextsTest, Basic) {
+ struct TestInfo {
+ int size;
+ uint8 color[4];
+ GLManager* manager;
+ };
+ const int kNumTests = 3;
+ TestInfo tests[] = {
+ { kSize0, { 255, 0, 0, 0, }, &gl_real_, },
+ { kSize1, { 0, 255, 0, 0, }, &gl1_, },
+ { kSize2, { 0, 0, 255, 0, }, &gl2_, },
+ };
+
+ for (int ii = 0; ii < kNumTests; ++ii) {
+ const TestInfo& test = tests[ii];
+ GLManager* gl_manager = test.manager;
+ gl_manager->MakeCurrent();
+ SetupSimpleShader(test.color);
+ }
+
+ for (int ii = 0; ii < kNumTests; ++ii) {
+ const TestInfo& test = tests[ii];
+ GLManager* gl_manager = test.manager;
+ gl_manager->MakeCurrent();
+ TestDraw(test.size);
+ }
+
+ for (int ii = 0; ii < kNumTests; ++ii) {
+ const TestInfo& test = tests[ii];
+ GLManager* gl_manager = test.manager;
+ gl_manager->MakeCurrent();
+ EXPECT_TRUE(GLTestHelper::CheckPixels(
+ 0, 0, test.size, test.size, 0, test.color));
+ }
+
+ for (int ii = 0; ii < kNumTests; ++ii) {
+ const TestInfo& test = tests[ii];
+ GLManager* gl_manager = test.manager;
+ gl_manager->MakeCurrent();
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+ }
+}
+
+// http://crbug.com/363407
+TEST_F(GLVirtualContextsTest, VertexArrayObjectRestore) {
+ GLuint vao1 = 0, vao2 = 0;
+
+ gl1_.MakeCurrent();
+ // Set up red quad in vao1.
+ glGenVertexArraysOES(1, &vao1);
+ glBindVertexArrayOES(vao1);
+ SetUpColoredUnitQuad(kFloatRed);
+ glFinish();
+
+ gl2_.MakeCurrent();
+ // Set up green quad in vao2.
+ glGenVertexArraysOES(1, &vao2);
+ glBindVertexArrayOES(vao2);
+ SetUpColoredUnitQuad(kFloatGreen);
+ glFinish();
+
+ gl1_.MakeCurrent();
+ // Test to ensure that vao1 is still the active VAO for this context.
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize1, kSize1, 0, kExpectedRed));
+ glFinish();
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+
+ gl2_.MakeCurrent();
+ // Test to ensure that vao2 is still the active VAO for this context.
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kSize2, kSize2, 0, kExpectedGreen));
+ glFinish();
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+// http://crbug.com/363407
+TEST_F(GLVirtualContextsTest, VertexArrayObjectRestoreRebind) {
+ GLuint vao1 = 0, vao2 = 0;
+
+ gl1_.MakeCurrent();
+ // Set up red quad in vao1.
+ glGenVertexArraysOES(1, &vao1);
+ glBindVertexArrayOES(vao1);
+ SetUpColoredUnitQuad(kFloatRed);
+ glFinish();
+
+ gl2_.MakeCurrent();
+ // Set up green quad in new vao2.
+ glGenVertexArraysOES(1, &vao2);
+ glBindVertexArrayOES(vao2);
+ SetUpColoredUnitQuad(kFloatGreen);
+ glFinish();
+
+ gl1_.MakeCurrent();
+ // Test to ensure that vao1 hasn't been corrupted after rebinding.
+ // Bind 0 is required so that bind vao1 is not optimized away in the service.
+ glBindVertexArrayOES(0);
+ glBindVertexArrayOES(vao1);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize1, kSize1, 0, kExpectedRed));
+ glFinish();
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+
+ gl2_.MakeCurrent();
+ // Test to ensure that vao1 hasn't been corrupted after rebinding.
+ // Bind 0 is required so that bind vao2 is not optimized away in the service.
+ glBindVertexArrayOES(0);
+ glBindVertexArrayOES(vao2);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kSize2, kSize2, 0, kExpectedGreen));
+ glFinish();
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+// http://crbug.com/363407
+TEST_F(GLVirtualContextsTest, VertexArrayObjectRestoreDefault) {
+ gl1_.MakeCurrent();
+ // Set up red quad in default VAO.
+ SetUpColoredUnitQuad(kFloatRed);
+ glFinish();
+
+ gl2_.MakeCurrent();
+ // Set up green quad in default VAO.
+ SetUpColoredUnitQuad(kFloatGreen);
+ glFinish();
+
+ // Gen & bind a non-default VAO.
+ GLuint vao;
+ glGenVertexArraysOES(1, &vao);
+ glBindVertexArrayOES(vao);
+ glFinish();
+
+ gl1_.MakeCurrent();
+ // Test to ensure that default VAO on gl1_ is still valid.
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(GLTestHelper::CheckPixels(0, 0, kSize1, kSize1, 0, kExpectedRed));
+ glFinish();
+
+ gl2_.MakeCurrent();
+ // Test to ensure that default VAO on gl2_ is still valid.
+ // This tests that a default VAO is restored even when it's not currently
+ // bound during the context switch.
+ glBindVertexArrayOES(0);
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+ EXPECT_TRUE(
+ GLTestHelper::CheckPixels(0, 0, kSize2, kSize2, 0, kExpectedGreen));
+ glFinish();
+
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
diff --git a/gpu/command_buffer/tests/occlusion_query_unittest.cc b/gpu/command_buffer/tests/occlusion_query_unittest.cc
new file mode 100644
index 0000000..7cbb1a8
--- /dev/null
+++ b/gpu/command_buffer/tests/occlusion_query_unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include "gpu/command_buffer/tests/gl_manager.h"
+#include "gpu/command_buffer/tests/gl_test_utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class OcclusionQueryTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ GLManager::Options options;
+ options.size = gfx::Size(512, 512);
+ gl_.Initialize(options);
+ }
+
+ virtual void TearDown() {
+ gl_.Destroy();
+ }
+
+ void DrawRect(float x, float z, float scale, float* color);
+
+ GLManager gl_;
+
+ GLint position_loc_;
+ GLint matrix_loc_;
+ GLint color_loc_;
+};
+
+static void SetMatrix(float x, float z, float scale, float* matrix) {
+ matrix[0] = scale;
+ matrix[1] = 0.0f;
+ matrix[2] = 0.0f;
+ matrix[3] = 0.0f;
+
+ matrix[4] = 0.0f;
+ matrix[5] = scale;
+ matrix[6] = 0.0f;
+ matrix[7] = 0.0f;
+
+ matrix[8] = 0.0f;
+ matrix[9] = 0.0f;
+ matrix[10] = scale;
+ matrix[11] = 0.0f;
+
+ matrix[12] = x;
+ matrix[13] = 0.0f;
+ matrix[14] = z;
+ matrix[15] = 1.0f;
+}
+
+void OcclusionQueryTest::DrawRect(float x, float z, float scale, float* color) {
+ GLfloat matrix[16];
+
+ SetMatrix(x, z, scale, matrix);
+
+ // Set up the model matrix
+ glUniformMatrix4fv(matrix_loc_, 1, GL_FALSE, matrix);
+ glUniform4fv(color_loc_, 1, color);
+
+ glDrawArrays(GL_TRIANGLES, 0, 6);
+}
+
+TEST_F(OcclusionQueryTest, Occlusion) {
+#if defined(OS_MACOSX)
+ EXPECT_TRUE(GLTestHelper::HasExtension("GL_EXT_occlusion_query_boolean"))
+ << "GL_EXT_occlusion_query_boolean is required on OSX";
+#endif
+
+ if (!GLTestHelper::HasExtension("GL_EXT_occlusion_query_boolean")) {
+ return;
+ }
+
+ static const char* v_shader_str =
+ "uniform mat4 worldMatrix;\n"
+ "attribute vec3 g_Position;\n"
+ "void main()\n"
+ "{\n"
+ " gl_Position = worldMatrix *\n"
+ " vec4(g_Position.x, g_Position.y, g_Position.z, 1.0);\n"
+ "}\n";
+ static const char* f_shader_str =
+ "precision mediump float;"
+ "uniform vec4 color;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = color;\n"
+ "}\n";
+
+ GLuint program = GLTestHelper::LoadProgram(v_shader_str, f_shader_str);
+
+ position_loc_ = glGetAttribLocation(program, "g_Position");
+ matrix_loc_ = glGetUniformLocation(program, "worldMatrix");
+ color_loc_ = glGetUniformLocation(program, "color");
+
+ GLTestHelper::SetupUnitQuad(position_loc_);
+
+ GLuint query = 0;
+ glGenQueriesEXT(1, &query);
+
+ glEnable(GL_DEPTH_TEST);
+ glClearColor(0.0f, 0.1f, 0.2f, 1.0f);
+
+ // Use the program object
+ glUseProgram(program);
+
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ static float red[] = { 1.0f, 0.0f, 0.0f, 1.0f };
+ DrawRect(0, 0.0f, 0.50f, red);
+
+ glBeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, query);
+ static float blue[] = { 0.0f, 0.0f, 1.0f, 1.0f };
+ DrawRect(-0.125f, 0.1f, 0.25f, blue);
+ glEndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+
+ glFinish();
+
+ GLuint query_status = 0;
+ GLuint result = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &result);
+ EXPECT_TRUE(result);
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &query_status);
+ EXPECT_FALSE(query_status);
+
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ DrawRect(1, 0.0f, 0.50f, red);
+
+ glBeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, query);
+ DrawRect(-0.125f, 0.1f, 0.25f, blue);
+ glEndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+
+ glFinish();
+
+ query_status = 0;
+ result = 0;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE_EXT, &result);
+ EXPECT_TRUE(result);
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_EXT, &query_status);
+ EXPECT_TRUE(query_status);
+ GLTestHelper::CheckGLError("no errors", __LINE__);
+}
+
+} // namespace gpu
+
+