Update from chromium https://crrev.com/302282
Updates based on chromium https://crrev.com/302282 /
30228db41b946899ce33f284da0adf2b35188d552de47. Contains updates for
https://crrev.com/301916 and https://crrev.com/301795.
TBR=ben@chromium.org
Review URL: https://codereview.chromium.org/683113005
diff --git a/cc/BUILD.gn b/cc/BUILD.gn
index 8cb8e8a..292d476 100644
--- a/cc/BUILD.gn
+++ b/cc/BUILD.gn
@@ -559,6 +559,8 @@
"test/layer_tree_host_common_test.h",
"test/layer_tree_json_parser.cc",
"test/layer_tree_json_parser.h",
+ "test/layer_tree_pixel_resource_test.cc",
+ "test/layer_tree_pixel_resource_test.h",
"test/layer_tree_pixel_test.cc",
"test/layer_tree_pixel_test.h",
"test/layer_tree_test.cc",
@@ -598,6 +600,8 @@
"test/test_gles2_interface.h",
"test/test_gpu_memory_buffer_manager.cc",
"test/test_gpu_memory_buffer_manager.h",
+ "test/test_image_factory.cc",
+ "test/test_image_factory.h",
"test/test_now_source.cc",
"test/test_now_source.h",
"test/test_occlusion_tracker.h",
@@ -740,6 +744,7 @@
"trees/layer_tree_host_pixeltest_masks.cc",
"trees/layer_tree_host_pixeltest_on_demand_raster.cc",
"trees/layer_tree_host_pixeltest_readback.cc",
+ "trees/layer_tree_host_pixeltest_synchronous.cc",
"trees/layer_tree_host_unittest.cc",
"trees/layer_tree_host_unittest_animation.cc",
"trees/layer_tree_host_unittest_context.cc",
diff --git a/cc/base/delayed_unique_notifier.cc b/cc/base/delayed_unique_notifier.cc
index c297267..1824226 100644
--- a/cc/base/delayed_unique_notifier.cc
+++ b/cc/base/delayed_unique_notifier.cc
@@ -43,6 +43,15 @@
next_notification_time_ = base::TimeTicks();
}
+void DelayedUniqueNotifier::Shutdown() {
+ // This function must destroy any weak ptrs since after being cancelled, this
+ // class may be destroyed on another thread during compositor shutdown.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+ // Deliberately leaves notification_pending_ = true forever so new tasks with
+ // weak ptrs can not be created.
+ notification_pending_ = true;
+}
+
bool DelayedUniqueNotifier::HasPendingNotification() const {
return notification_pending_ && !next_notification_time_.is_null();
}
diff --git a/cc/base/delayed_unique_notifier.h b/cc/base/delayed_unique_notifier.h
index 59d906b..02051be 100644
--- a/cc/base/delayed_unique_notifier.h
+++ b/cc/base/delayed_unique_notifier.h
@@ -37,6 +37,11 @@
// Cancel any previously scheduled runs.
void Cancel();
+ // Cancel previously scheduled runs and prevent any new runs from starting.
+ // After calling this the DelayedUniqueNotifier will have no outstanding
+ // WeakPtrs.
+ void Shutdown();
+
// Returns true if a notification is currently scheduled to run.
bool HasPendingNotification() const;
diff --git a/cc/base/delayed_unique_notifier_unittest.cc b/cc/base/delayed_unique_notifier_unittest.cc
index 41cde83..3e41d32 100644
--- a/cc/base/delayed_unique_notifier_unittest.cc
+++ b/cc/base/delayed_unique_notifier_unittest.cc
@@ -262,5 +262,74 @@
EXPECT_FALSE(notifier.HasPendingNotification());
}
+TEST_F(DelayedUniqueNotifierTest, ShutdownWithScheduledTask) {
+ base::TimeDelta delay = base::TimeDelta::FromInternalValue(20);
+ TestNotifier notifier(
+ task_runner_.get(),
+ base::Bind(&DelayedUniqueNotifierTest::Notify, base::Unretained(this)),
+ delay);
+
+ EXPECT_EQ(0, NotificationCount());
+
+ // Schedule for |delay| seconds from now.
+ base::TimeTicks schedule_time =
+ notifier.Now() + base::TimeDelta::FromInternalValue(10);
+ notifier.SetNow(schedule_time);
+ notifier.Schedule();
+ EXPECT_TRUE(notifier.HasPendingNotification());
+
+ // Shutdown the notifier.
+ notifier.Shutdown();
+
+ // The task is still there, but...
+ std::deque<base::TestPendingTask> tasks = TakePendingTasks();
+ ASSERT_EQ(1u, tasks.size());
+
+ // Running the task after shutdown does nothing since it's cancelled.
+ tasks[0].task.Run();
+ EXPECT_EQ(0, NotificationCount());
+
+ tasks = TakePendingTasks();
+ EXPECT_EQ(0u, tasks.size());
+
+ // We are no longer able to schedule tasks.
+ notifier.Schedule();
+ tasks = TakePendingTasks();
+ ASSERT_EQ(0u, tasks.size());
+
+ // Verify after the scheduled time happens there is still no task.
+ notifier.SetNow(notifier.Now() + delay);
+ tasks = TakePendingTasks();
+ ASSERT_EQ(0u, tasks.size());
+}
+
+TEST_F(DelayedUniqueNotifierTest, ShutdownPreventsSchedule) {
+ base::TimeDelta delay = base::TimeDelta::FromInternalValue(20);
+ TestNotifier notifier(
+ task_runner_.get(),
+ base::Bind(&DelayedUniqueNotifierTest::Notify, base::Unretained(this)),
+ delay);
+
+ EXPECT_EQ(0, NotificationCount());
+
+ // Schedule for |delay| seconds from now.
+ base::TimeTicks schedule_time =
+ notifier.Now() + base::TimeDelta::FromInternalValue(10);
+ notifier.SetNow(schedule_time);
+
+ // Shutdown the notifier.
+ notifier.Shutdown();
+
+ // Scheduling a task no longer does anything.
+ notifier.Schedule();
+ std::deque<base::TestPendingTask> tasks = TakePendingTasks();
+ ASSERT_EQ(0u, tasks.size());
+
+ // Verify after the scheduled time happens there is still no task.
+ notifier.SetNow(notifier.Now() + delay);
+ tasks = TakePendingTasks();
+ ASSERT_EQ(0u, tasks.size());
+}
+
} // namespace
} // namespace cc
diff --git a/cc/cc_tests.gyp b/cc/cc_tests.gyp
index 2724ec0..1300d32 100644
--- a/cc/cc_tests.gyp
+++ b/cc/cc_tests.gyp
@@ -107,6 +107,7 @@
'trees/layer_tree_host_pixeltest_masks.cc',
'trees/layer_tree_host_pixeltest_on_demand_raster.cc',
'trees/layer_tree_host_pixeltest_readback.cc',
+ 'trees/layer_tree_host_pixeltest_synchronous.cc',
'trees/layer_tree_host_unittest.cc',
'trees/layer_tree_host_unittest_animation.cc',
'trees/layer_tree_host_unittest_context.cc',
@@ -198,6 +199,8 @@
'test/layer_tree_host_common_test.h',
'test/layer_tree_json_parser.cc',
'test/layer_tree_json_parser.h',
+ 'test/layer_tree_pixel_resource_test.cc',
+ 'test/layer_tree_pixel_resource_test.h',
'test/layer_tree_pixel_test.cc',
'test/layer_tree_pixel_test.h',
'test/layer_tree_test.cc',
@@ -237,6 +240,8 @@
'test/test_gles2_interface.h',
'test/test_gpu_memory_buffer_manager.cc',
'test/test_gpu_memory_buffer_manager.h',
+ 'test/test_image_factory.cc',
+ 'test/test_image_factory.h',
'test/test_now_source.cc',
'test/test_now_source.h',
'test/test_occlusion_tracker.h',
diff --git a/cc/layers/delegated_renderer_layer_impl.cc b/cc/layers/delegated_renderer_layer_impl.cc
index c6692d1..9eca7a1 100644
--- a/cc/layers/delegated_renderer_layer_impl.cc
+++ b/cc/layers/delegated_renderer_layer_impl.cc
@@ -125,10 +125,9 @@
&invalid_frame,
resource_map,
&resources_in_frame);
- for (size_t i = 0; i < render_pass_list.size(); ++i) {
- RenderPass* pass = render_pass_list[i];
- for (auto& quad : pass->quad_list)
- quad.IterateResources(remap_resources_to_parent_callback);
+ for (const auto& pass : render_pass_list) {
+ for (const auto& quad : pass->quad_list)
+ quad->IterateResources(remap_resources_to_parent_callback);
}
if (invalid_frame) {
@@ -396,8 +395,8 @@
bool is_root_delegated_render_pass =
delegated_render_pass == render_passes_in_draw_order_.back();
- if (delegated_quad.shared_quad_state != delegated_shared_quad_state) {
- delegated_shared_quad_state = delegated_quad.shared_quad_state;
+ if (delegated_quad->shared_quad_state != delegated_shared_quad_state) {
+ delegated_shared_quad_state = delegated_quad->shared_quad_state;
output_shared_quad_state = render_pass->CreateAndAppendSharedQuadState();
output_shared_quad_state->CopyFrom(delegated_shared_quad_state);
@@ -447,18 +446,18 @@
gfx::Rect quad_visible_rect =
occlusion_in_quad_space.GetUnoccludedContentRect(
- delegated_quad.visible_rect);
+ delegated_quad->visible_rect);
if (quad_visible_rect.IsEmpty())
continue;
- if (delegated_quad.material != DrawQuad::RENDER_PASS) {
+ if (delegated_quad->material != DrawQuad::RENDER_PASS) {
DrawQuad* output_quad = render_pass->CopyFromAndAppendDrawQuad(
- &delegated_quad, output_shared_quad_state);
+ delegated_quad, output_shared_quad_state);
output_quad->visible_rect = quad_visible_rect;
} else {
RenderPassId delegated_contributing_render_pass_id =
- RenderPassDrawQuad::MaterialCast(&delegated_quad)->render_pass_id;
+ RenderPassDrawQuad::MaterialCast(delegated_quad)->render_pass_id;
RenderPassId output_contributing_render_pass_id(-1, -1);
bool present =
@@ -473,7 +472,7 @@
RenderPassDrawQuad* output_quad =
render_pass->CopyFromAndAppendRenderPassDrawQuad(
- RenderPassDrawQuad::MaterialCast(&delegated_quad),
+ RenderPassDrawQuad::MaterialCast(delegated_quad),
output_shared_quad_state,
output_contributing_render_pass_id);
output_quad->visible_rect = quad_visible_rect;
diff --git a/cc/layers/nine_patch_layer_impl_unittest.cc b/cc/layers/nine_patch_layer_impl_unittest.cc
index 730b3e5..695d827 100644
--- a/cc/layers/nine_patch_layer_impl_unittest.cc
+++ b/cc/layers/nine_patch_layer_impl_unittest.cc
@@ -89,7 +89,7 @@
gfx::Rect bitmap_rect(bitmap_size);
Region tex_remaining(bitmap_rect);
for (const auto& quad : quads) {
- const TextureDrawQuad* tex_quad = TextureDrawQuad::MaterialCast(&quad);
+ const TextureDrawQuad* tex_quad = TextureDrawQuad::MaterialCast(quad);
gfx::RectF tex_rect =
gfx::BoundingRect(tex_quad->uv_top_left, tex_quad->uv_bottom_right);
tex_rect.Scale(bitmap_size.width(), bitmap_size.height());
diff --git a/cc/layers/picture_layer.cc b/cc/layers/picture_layer.cc
index 6e5ba79..f608ba7 100644
--- a/cc/layers/picture_layer.cc
+++ b/cc/layers/picture_layer.cc
@@ -51,7 +51,7 @@
// See PictureLayerImpl::PushPropertiesTo for more details.
layer_impl->invalidation_.Clear();
layer_impl->invalidation_.Swap(&pile_invalidation_);
- layer_impl->pile_ = PicturePileImpl::CreateFromOther(&pile_);
+ layer_impl->UpdatePile(PicturePileImpl::CreateFromOther(&pile_));
}
void PictureLayer::SetLayerTreeHost(LayerTreeHost* host) {
diff --git a/cc/layers/picture_layer_impl.cc b/cc/layers/picture_layer_impl.cc
index 7fd7730..c3c7ca0 100644
--- a/cc/layers/picture_layer_impl.cc
+++ b/cc/layers/picture_layer_impl.cc
@@ -122,7 +122,7 @@
twin_layer_ = layer_impl;
layer_impl->twin_layer_ = this;
- layer_impl->pile_ = pile_;
+ layer_impl->UpdatePile(pile_);
DCHECK(!pile_->is_solid_color() || !tilings_->num_tilings());
// Tilings would be expensive to push, so we swap.
@@ -158,6 +158,16 @@
needs_push_properties_ = true;
}
+void PictureLayerImpl::UpdatePile(scoped_refptr<PicturePileImpl> pile) {
+ bool could_have_tilings = CanHaveTilings();
+ pile_.swap(pile);
+
+ // Need to call UpdateTiles again if CanHaveTilings changed.
+ if (could_have_tilings != CanHaveTilings()) {
+ layer_tree_impl()->set_needs_update_draw_properties();
+ }
+}
+
void PictureLayerImpl::AppendQuads(RenderPass* render_pass,
const Occlusion& occlusion_in_content_space,
AppendQuadsData* append_quads_data) {
diff --git a/cc/layers/picture_layer_impl.h b/cc/layers/picture_layer_impl.h
index c5aab85..024e315 100644
--- a/cc/layers/picture_layer_impl.h
+++ b/cc/layers/picture_layer_impl.h
@@ -169,6 +169,7 @@
void ResetRasterScale();
gfx::Rect GetViewportForTilePriorityInContentSpace() const;
PictureLayerImpl* GetRecycledTwinLayer() const;
+ void UpdatePile(scoped_refptr<PicturePileImpl> pile);
void DoPostCommitInitializationIfNeeded() {
if (needs_post_commit_initialization_)
diff --git a/cc/layers/picture_layer_impl_unittest.cc b/cc/layers/picture_layer_impl_unittest.cc
index 06f75f0..6b2d4c9 100644
--- a/cc/layers/picture_layer_impl_unittest.cc
+++ b/cc/layers/picture_layer_impl_unittest.cc
@@ -1467,15 +1467,47 @@
active_layer_->DidDraw(nullptr);
Region remaining = visible_rect;
- for (auto& quad : render_pass->quad_list) {
- EXPECT_TRUE(visible_rect.Contains(quad.rect));
- EXPECT_TRUE(remaining.Contains(quad.rect));
- remaining.Subtract(quad.rect);
+ for (const auto& quad : render_pass->quad_list) {
+ EXPECT_TRUE(visible_rect.Contains(quad->rect));
+ EXPECT_TRUE(remaining.Contains(quad->rect));
+ remaining.Subtract(quad->rect);
}
EXPECT_TRUE(remaining.IsEmpty());
}
+TEST_F(PictureLayerImplTest, TileScalesWithSolidColorPile) {
+ gfx::Size layer_bounds(200, 200);
+ gfx::Size tile_size(host_impl_.settings().default_tile_size);
+ scoped_refptr<FakePicturePileImpl> pending_pile =
+ FakePicturePileImpl::CreateEmptyPileThatThinksItHasRecordings(
+ tile_size, layer_bounds);
+ scoped_refptr<FakePicturePileImpl> active_pile =
+ FakePicturePileImpl::CreateEmptyPileThatThinksItHasRecordings(
+ tile_size, layer_bounds);
+
+ pending_pile->set_is_solid_color(false);
+ active_pile->set_is_solid_color(true);
+ SetupTrees(pending_pile, active_pile);
+ // Solid color layer should not have tilings.
+ ASSERT_FALSE(active_layer_->CanHaveTilings());
+
+ // Update properties with solid color pile should not allow tilings at any
+ // scale.
+ host_impl_.active_tree()->UpdateDrawProperties();
+ EXPECT_FALSE(active_layer_->CanHaveTilings());
+ EXPECT_EQ(0.f, active_layer_->ideal_contents_scale());
+
+ // Push non-solid-color pending pile makes active layer can have tilings.
+ active_layer_->UpdatePile(pending_pile);
+ ASSERT_TRUE(active_layer_->CanHaveTilings());
+
+ // Update properties with non-solid color pile should allow tilings.
+ host_impl_.active_tree()->UpdateDrawProperties();
+ EXPECT_TRUE(active_layer_->CanHaveTilings());
+ EXPECT_GT(active_layer_->ideal_contents_scale(), 0.f);
+}
+
TEST_F(PictureLayerImplTest, MarkRequiredOffscreenTiles) {
gfx::Size tile_size(100, 100);
gfx::Size layer_bounds(200, 200);
diff --git a/cc/layers/tiled_layer_impl_unittest.cc b/cc/layers/tiled_layer_impl_unittest.cc
index 1becc6e..689416a 100644
--- a/cc/layers/tiled_layer_impl_unittest.cc
+++ b/cc/layers/tiled_layer_impl_unittest.cc
@@ -163,7 +163,7 @@
EXPECT_EQ(0u, data.num_missing_tiles);
for (const auto& quad : render_pass->quad_list)
- EXPECT_EQ(quad.material, DrawQuad::TILED_CONTENT);
+ EXPECT_EQ(quad->material, DrawQuad::TILED_CONTENT);
}
for (int i = 0; i < num_tiles_x; ++i)
@@ -179,7 +179,7 @@
EXPECT_LT(0u, data.num_missing_tiles);
EXPECT_EQ(render_pass->quad_list.size(), 4u);
for (const auto& quad : render_pass->quad_list)
- EXPECT_NE(quad.material, DrawQuad::TILED_CONTENT);
+ EXPECT_NE(quad->material, DrawQuad::TILED_CONTENT);
}
}
@@ -258,7 +258,7 @@
for (auto iter = render_pass->quad_list.cbegin();
iter != render_pass->quad_list.cend();
++iter) {
- const TileDrawQuad* quad = TileDrawQuad::MaterialCast(&*iter);
+ const TileDrawQuad* quad = TileDrawQuad::MaterialCast(*iter);
EXPECT_NE(0u, quad->resource_id) << LayerTestCommon::quad_string
<< iter.index();
diff --git a/cc/output/delegating_renderer.cc b/cc/output/delegating_renderer.cc
index c4dc9fe..dc951af 100644
--- a/cc/output/delegating_renderer.cc
+++ b/cc/output/delegating_renderer.cc
@@ -90,10 +90,9 @@
ResourceProvider::ResourceIdArray resources;
DrawQuad::ResourceIteratorCallback append_to_array =
base::Bind(&AppendToArray, &resources);
- for (size_t i = 0; i < out_data.render_pass_list.size(); ++i) {
- RenderPass* render_pass = out_data.render_pass_list.at(i);
- for (auto& quad : render_pass->quad_list)
- quad.IterateResources(append_to_array);
+ for (const auto& render_pass : out_data.render_pass_list) {
+ for (const auto& quad : render_pass->quad_list)
+ quad->IterateResources(append_to_array);
}
resource_provider_->PrepareSendToParent(resources, &out_data.resource_list);
}
diff --git a/cc/output/direct_renderer.cc b/cc/output/direct_renderer.cc
index ead2e21..50d82d0 100644
--- a/cc/output/direct_renderer.cc
+++ b/cc/output/direct_renderer.cc
@@ -369,10 +369,9 @@
}
const QuadList& quad_list = render_pass->quad_list;
- for (QuadList::ConstBackToFrontIterator it = quad_list.BackToFrontBegin();
- it != quad_list.BackToFrontEnd();
+ for (auto it = quad_list.BackToFrontBegin(); it != quad_list.BackToFrontEnd();
++it) {
- const DrawQuad& quad = *it;
+ const DrawQuad& quad = **it;
bool should_skip_quad = false;
if (using_scissor_as_optimization) {
diff --git a/cc/output/gl_renderer.cc b/cc/output/gl_renderer.cc
index b93ddcf..a73c4af 100644
--- a/cc/output/gl_renderer.cc
+++ b/cc/output/gl_renderer.cc
@@ -49,35 +49,6 @@
namespace cc {
namespace {
-class FallbackFence : public ResourceProvider::Fence {
- public:
- explicit FallbackFence(gpu::gles2::GLES2Interface* gl)
- : gl_(gl), has_passed_(true) {}
-
- // Overridden from ResourceProvider::Fence:
- void Set() override { has_passed_ = false; }
- bool HasPassed() override {
- if (!has_passed_) {
- has_passed_ = true;
- Synchronize();
- }
- return true;
- }
-
- private:
- ~FallbackFence() override {}
-
- void Synchronize() {
- TRACE_EVENT0("cc", "FallbackFence::Synchronize");
- gl_->Finish();
- }
-
- gpu::gles2::GLES2Interface* gl_;
- bool has_passed_;
-
- DISALLOW_COPY_AND_ASSIGN(FallbackFence);
-};
-
bool NeedsIOSurfaceReadbackWorkaround() {
#if defined(OS_MACOSX)
// This isn't strictly required in DumpRenderTree-mode when Mesa is used,
@@ -293,6 +264,10 @@
query_->Set();
}
bool HasPassed() override { return !query_ || !query_->IsPending(); }
+ void Wait() override {
+ if (query_)
+ query_->Wait();
+ }
private:
~Fence() override {}
@@ -465,9 +440,6 @@
}
void GLRenderer::BeginDrawingFrame(DrawingFrame* frame) {
- if (frame->device_viewport_rect.IsEmpty())
- return;
-
TRACE_EVENT0("cc", "GLRenderer::BeginDrawingFrame");
scoped_refptr<ResourceProvider::Fence> read_lock_fence;
@@ -494,7 +466,8 @@
read_lock_fence = current_sync_query_->Begin();
} else {
- read_lock_fence = make_scoped_refptr(new FallbackFence(gl_));
+ read_lock_fence =
+ make_scoped_refptr(new ResourceProvider::SynchronousFence(gl_));
}
resource_provider_->SetReadLockFence(read_lock_fence.get());
@@ -503,10 +476,9 @@
DrawQuad::ResourceIteratorCallback wait_on_resource_syncpoints_callback =
base::Bind(&WaitOnResourceSyncPoints, resource_provider_);
- for (size_t i = 0; i < frame->render_passes_in_draw_order->size(); ++i) {
- RenderPass* pass = frame->render_passes_in_draw_order->at(i);
- for (auto& quad : pass->quad_list)
- quad.IterateResources(wait_on_resource_syncpoints_callback);
+ for (const auto& pass : *frame->render_passes_in_draw_order) {
+ for (const auto& quad : pass->quad_list)
+ quad->IterateResources(wait_on_resource_syncpoints_callback);
}
// TODO(enne): Do we need to reinitialize all of this state per frame?
@@ -1183,8 +1155,8 @@
: BlendModeNormal;
if (use_aa && mask_texture_id && !use_color_matrix) {
- const RenderPassMaskProgramAA* program =
- GetRenderPassMaskProgramAA(tex_coord_precision, shader_blend_mode);
+ const RenderPassMaskProgramAA* program = GetRenderPassMaskProgramAA(
+ tex_coord_precision, mask_sampler, shader_blend_mode);
SetUseProgram(program->program());
GLC(gl_, gl_->Uniform1i(program->fragment_shader().sampler_location(), 0));
@@ -1205,8 +1177,8 @@
shader_backdrop_rect_location =
program->fragment_shader().backdrop_rect_location();
} else if (!use_aa && mask_texture_id && !use_color_matrix) {
- const RenderPassMaskProgram* program =
- GetRenderPassMaskProgram(tex_coord_precision, shader_blend_mode);
+ const RenderPassMaskProgram* program = GetRenderPassMaskProgram(
+ tex_coord_precision, mask_sampler, shader_blend_mode);
SetUseProgram(program->program());
GLC(gl_, gl_->Uniform1i(program->fragment_shader().sampler_location(), 0));
@@ -1241,8 +1213,8 @@
program->fragment_shader().backdrop_rect_location();
} else if (use_aa && mask_texture_id && use_color_matrix) {
const RenderPassMaskColorMatrixProgramAA* program =
- GetRenderPassMaskColorMatrixProgramAA(tex_coord_precision,
- shader_blend_mode);
+ GetRenderPassMaskColorMatrixProgramAA(
+ tex_coord_precision, mask_sampler, shader_blend_mode);
SetUseProgram(program->program());
GLC(gl_, gl_->Uniform1i(program->fragment_shader().sampler_location(), 0));
@@ -1289,8 +1261,8 @@
program->fragment_shader().backdrop_rect_location();
} else if (!use_aa && mask_texture_id && use_color_matrix) {
const RenderPassMaskColorMatrixProgram* program =
- GetRenderPassMaskColorMatrixProgram(tex_coord_precision,
- shader_blend_mode);
+ GetRenderPassMaskColorMatrixProgram(
+ tex_coord_precision, mask_sampler, shader_blend_mode);
SetUseProgram(program->program());
GLC(gl_, gl_->Uniform1i(program->fragment_shader().sampler_location(), 0));
@@ -1361,14 +1333,16 @@
-tex_scale_y));
GLint last_texture_unit = 0;
- scoped_ptr<ResourceProvider::ScopedSamplerGL> shader_mask_sampler_lock;
if (shader_mask_sampler_location != -1) {
DCHECK_NE(shader_mask_tex_coord_scale_location, 1);
DCHECK_NE(shader_mask_tex_coord_offset_location, 1);
- DCHECK_EQ(SamplerType2D, mask_sampler);
GLC(gl_, gl_->Uniform1i(shader_mask_sampler_location, 1));
gfx::RectF mask_uv_rect = quad->MaskUVRect();
+ if (mask_sampler != SamplerType2D) {
+ mask_uv_rect.Scale(quad->mask_texture_size.width(),
+ quad->mask_texture_size.height());
+ }
// Mask textures are oriented vertically flipped relative to the framebuffer
// and the RenderPass contents texture, so we flip the tex coords from the
@@ -2913,38 +2887,40 @@
const GLRenderer::RenderPassMaskProgram* GLRenderer::GetRenderPassMaskProgram(
TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
DCHECK_GE(precision, 0);
DCHECK_LT(precision, NumTexCoordPrecisions);
+ DCHECK_GE(sampler, 0);
+ DCHECK_LT(sampler, NumSamplerTypes);
DCHECK_GE(blend_mode, 0);
DCHECK_LT(blend_mode, NumBlendModes);
RenderPassMaskProgram* program =
- &render_pass_mask_program_[precision][blend_mode];
+ &render_pass_mask_program_[precision][sampler][blend_mode];
if (!program->initialized()) {
TRACE_EVENT0("cc", "GLRenderer::renderPassMaskProgram::initialize");
- program->Initialize(output_surface_->context_provider(),
- precision,
- SamplerType2D,
- blend_mode);
+ program->Initialize(
+ output_surface_->context_provider(), precision, sampler, blend_mode);
}
return program;
}
const GLRenderer::RenderPassMaskProgramAA*
GLRenderer::GetRenderPassMaskProgramAA(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
DCHECK_GE(precision, 0);
DCHECK_LT(precision, NumTexCoordPrecisions);
+ DCHECK_GE(sampler, 0);
+ DCHECK_LT(sampler, NumSamplerTypes);
DCHECK_GE(blend_mode, 0);
DCHECK_LT(blend_mode, NumBlendModes);
RenderPassMaskProgramAA* program =
- &render_pass_mask_program_aa_[precision][blend_mode];
+ &render_pass_mask_program_aa_[precision][sampler][blend_mode];
if (!program->initialized()) {
TRACE_EVENT0("cc", "GLRenderer::renderPassMaskProgramAA::initialize");
- program->Initialize(output_surface_->context_provider(),
- precision,
- SamplerType2D,
- blend_mode);
+ program->Initialize(
+ output_surface_->context_provider(), precision, sampler, blend_mode);
}
return program;
}
@@ -2990,40 +2966,43 @@
const GLRenderer::RenderPassMaskColorMatrixProgram*
GLRenderer::GetRenderPassMaskColorMatrixProgram(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
DCHECK_GE(precision, 0);
DCHECK_LT(precision, NumTexCoordPrecisions);
+ DCHECK_GE(sampler, 0);
+ DCHECK_LT(sampler, NumSamplerTypes);
DCHECK_GE(blend_mode, 0);
DCHECK_LT(blend_mode, NumBlendModes);
RenderPassMaskColorMatrixProgram* program =
- &render_pass_mask_color_matrix_program_[precision][blend_mode];
+ &render_pass_mask_color_matrix_program_[precision][sampler][blend_mode];
if (!program->initialized()) {
TRACE_EVENT0("cc",
"GLRenderer::renderPassMaskColorMatrixProgram::initialize");
- program->Initialize(output_surface_->context_provider(),
- precision,
- SamplerType2D,
- blend_mode);
+ program->Initialize(
+ output_surface_->context_provider(), precision, sampler, blend_mode);
}
return program;
}
const GLRenderer::RenderPassMaskColorMatrixProgramAA*
GLRenderer::GetRenderPassMaskColorMatrixProgramAA(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
DCHECK_GE(precision, 0);
DCHECK_LT(precision, NumTexCoordPrecisions);
+ DCHECK_GE(sampler, 0);
+ DCHECK_LT(sampler, NumSamplerTypes);
DCHECK_GE(blend_mode, 0);
DCHECK_LT(blend_mode, NumBlendModes);
RenderPassMaskColorMatrixProgramAA* program =
- &render_pass_mask_color_matrix_program_aa_[precision][blend_mode];
+ &render_pass_mask_color_matrix_program_aa_[precision][sampler]
+ [blend_mode];
if (!program->initialized()) {
TRACE_EVENT0("cc",
"GLRenderer::renderPassMaskColorMatrixProgramAA::initialize");
- program->Initialize(output_surface_->context_provider(),
- precision,
- SamplerType2D,
- blend_mode);
+ program->Initialize(
+ output_surface_->context_provider(), precision, sampler, blend_mode);
}
return program;
}
@@ -3248,16 +3227,19 @@
tile_program_swizzle_opaque_[i][j].Cleanup(gl_);
tile_program_aa_[i][j].Cleanup(gl_);
tile_program_swizzle_aa_[i][j].Cleanup(gl_);
+
+ for (int k = 0; k < NumBlendModes; k++) {
+ render_pass_mask_program_[i][j][k].Cleanup(gl_);
+ render_pass_mask_program_aa_[i][j][k].Cleanup(gl_);
+ render_pass_mask_color_matrix_program_aa_[i][j][k].Cleanup(gl_);
+ render_pass_mask_color_matrix_program_[i][j][k].Cleanup(gl_);
+ }
}
for (int j = 0; j < NumBlendModes; j++) {
- render_pass_mask_program_[i][j].Cleanup(gl_);
render_pass_program_[i][j].Cleanup(gl_);
- render_pass_mask_program_aa_[i][j].Cleanup(gl_);
render_pass_program_aa_[i][j].Cleanup(gl_);
render_pass_color_matrix_program_[i][j].Cleanup(gl_);
- render_pass_mask_color_matrix_program_aa_[i][j].Cleanup(gl_);
render_pass_color_matrix_program_aa_[i][j].Cleanup(gl_);
- render_pass_mask_color_matrix_program_[i][j].Cleanup(gl_);
}
texture_program_[i].Cleanup(gl_);
diff --git a/cc/output/gl_renderer.h b/cc/output/gl_renderer.h
index ad74396..dc3160a 100644
--- a/cc/output/gl_renderer.h
+++ b/cc/output/gl_renderer.h
@@ -336,9 +336,11 @@
BlendMode blend_mode);
const RenderPassMaskProgram* GetRenderPassMaskProgram(
TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode);
const RenderPassMaskProgramAA* GetRenderPassMaskProgramAA(
TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode);
const RenderPassColorMatrixProgram* GetRenderPassColorMatrixProgram(
TexCoordPrecision precision,
@@ -348,9 +350,11 @@
BlendMode blend_mode);
const RenderPassMaskColorMatrixProgram* GetRenderPassMaskColorMatrixProgram(
TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode);
const RenderPassMaskColorMatrixProgramAA*
GetRenderPassMaskColorMatrixProgramAA(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode);
const TextureProgram* GetTextureProgram(
@@ -399,18 +403,18 @@
RenderPassProgram render_pass_program_[NumTexCoordPrecisions][NumBlendModes];
RenderPassProgramAA
render_pass_program_aa_[NumTexCoordPrecisions][NumBlendModes];
- RenderPassMaskProgram
- render_pass_mask_program_[NumTexCoordPrecisions][NumBlendModes];
- RenderPassMaskProgramAA
- render_pass_mask_program_aa_[NumTexCoordPrecisions][NumBlendModes];
+ RenderPassMaskProgram render_pass_mask_program_
+ [NumTexCoordPrecisions][NumSamplerTypes][NumBlendModes];
+ RenderPassMaskProgramAA render_pass_mask_program_aa_
+ [NumTexCoordPrecisions][NumSamplerTypes][NumBlendModes];
RenderPassColorMatrixProgram
render_pass_color_matrix_program_[NumTexCoordPrecisions][NumBlendModes];
RenderPassColorMatrixProgramAA render_pass_color_matrix_program_aa_
[NumTexCoordPrecisions][NumBlendModes];
RenderPassMaskColorMatrixProgram render_pass_mask_color_matrix_program_
- [NumTexCoordPrecisions][NumBlendModes];
+ [NumTexCoordPrecisions][NumSamplerTypes][NumBlendModes];
RenderPassMaskColorMatrixProgramAA render_pass_mask_color_matrix_program_aa_
- [NumTexCoordPrecisions][NumBlendModes];
+ [NumTexCoordPrecisions][NumSamplerTypes][NumBlendModes];
VideoYUVProgram video_yuv_program_[NumTexCoordPrecisions];
VideoYUVAProgram video_yuva_program_[NumTexCoordPrecisions];
diff --git a/cc/output/gl_renderer_unittest.cc b/cc/output/gl_renderer_unittest.cc
index 8de90c4..dc68675 100644
--- a/cc/output/gl_renderer_unittest.cc
+++ b/cc/output/gl_renderer_unittest.cc
@@ -114,18 +114,6 @@
renderer()->GetRenderPassProgram(precision, blend_mode));
EXPECT_PROGRAM_VALID(
renderer()->GetRenderPassProgramAA(precision, blend_mode));
- EXPECT_PROGRAM_VALID(
- renderer()->GetRenderPassMaskProgram(precision, blend_mode));
- EXPECT_PROGRAM_VALID(
- renderer()->GetRenderPassMaskProgramAA(precision, blend_mode));
- EXPECT_PROGRAM_VALID(
- renderer()->GetRenderPassColorMatrixProgram(precision, blend_mode));
- EXPECT_PROGRAM_VALID(renderer()->GetRenderPassMaskColorMatrixProgramAA(
- precision, blend_mode));
- EXPECT_PROGRAM_VALID(
- renderer()->GetRenderPassColorMatrixProgramAA(precision, blend_mode));
- EXPECT_PROGRAM_VALID(renderer()->GetRenderPassMaskColorMatrixProgram(
- precision, blend_mode));
}
EXPECT_PROGRAM_VALID(renderer()->GetTextureProgram(precision));
EXPECT_PROGRAM_VALID(
@@ -158,6 +146,17 @@
renderer()->GetTileProgramSwizzleOpaque(precision, sampler));
EXPECT_PROGRAM_VALID(
renderer()->GetTileProgramSwizzleAA(precision, sampler));
+ for (int i = 0; i < NumBlendModes; ++i) {
+ BlendMode blend_mode = static_cast<BlendMode>(i);
+ EXPECT_PROGRAM_VALID(
+ renderer()->GetRenderPassMaskProgram(precision, sampler, blend_mode));
+ EXPECT_PROGRAM_VALID(renderer()->GetRenderPassMaskProgramAA(
+ precision, sampler, blend_mode));
+ EXPECT_PROGRAM_VALID(renderer()->GetRenderPassMaskColorMatrixProgramAA(
+ precision, sampler, blend_mode));
+ EXPECT_PROGRAM_VALID(renderer()->GetRenderPassMaskColorMatrixProgram(
+ precision, sampler, blend_mode));
+ }
}
};
@@ -266,23 +265,24 @@
}
void TestRenderPassMaskProgram(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
EXPECT_PROGRAM_VALID(
- &renderer_->render_pass_mask_program_[precision][blend_mode]);
+ &renderer_->render_pass_mask_program_[precision][sampler][blend_mode]);
EXPECT_EQ(
- renderer_->render_pass_mask_program_[precision][blend_mode].program(),
+ renderer_->render_pass_mask_program_[precision][sampler][blend_mode]
+ .program(),
renderer_->program_shadow_);
}
void TestRenderPassMaskColorMatrixProgram(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
- EXPECT_PROGRAM_VALID(
- &renderer_
- ->render_pass_mask_color_matrix_program_[precision][blend_mode]);
- EXPECT_EQ(
- renderer_->render_pass_mask_color_matrix_program_[precision][blend_mode]
- .program(),
- renderer_->program_shadow_);
+ EXPECT_PROGRAM_VALID(&renderer_->render_pass_mask_color_matrix_program_
+ [precision][sampler][blend_mode]);
+ EXPECT_EQ(renderer_->render_pass_mask_color_matrix_program_
+ [precision][sampler][blend_mode].program(),
+ renderer_->program_shadow_);
}
void TestRenderPassProgramAA(TexCoordPrecision precision,
@@ -306,23 +306,25 @@
}
void TestRenderPassMaskProgramAA(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
EXPECT_PROGRAM_VALID(
- &renderer_->render_pass_mask_program_aa_[precision][blend_mode]);
- EXPECT_EQ(renderer_->render_pass_mask_program_aa_[precision][blend_mode]
- .program(),
- renderer_->program_shadow_);
+ &renderer_
+ ->render_pass_mask_program_aa_[precision][sampler][blend_mode]);
+ EXPECT_EQ(
+ renderer_->render_pass_mask_program_aa_[precision][sampler][blend_mode]
+ .program(),
+ renderer_->program_shadow_);
}
void TestRenderPassMaskColorMatrixProgramAA(TexCoordPrecision precision,
+ SamplerType sampler,
BlendMode blend_mode) {
EXPECT_PROGRAM_VALID(&renderer_->render_pass_mask_color_matrix_program_aa_
- [precision][blend_mode]);
- EXPECT_EQ(
- renderer_
- ->render_pass_mask_color_matrix_program_aa_[precision][blend_mode]
- .program(),
- renderer_->program_shadow_);
+ [precision][sampler][blend_mode]);
+ EXPECT_EQ(renderer_->render_pass_mask_color_matrix_program_aa_
+ [precision][sampler][blend_mode].program(),
+ renderer_->program_shadow_);
}
void TestSolidColorProgramAA() {
@@ -1499,7 +1501,8 @@
viewport_rect,
viewport_rect,
false);
- TestRenderPassMaskProgram(TexCoordPrecisionMedium, blend_mode);
+ TestRenderPassMaskProgram(
+ TexCoordPrecisionMedium, SamplerType2D, blend_mode);
// RenderPassMaskColorMatrixProgram
render_passes_in_draw_order_.clear();
@@ -1524,7 +1527,8 @@
viewport_rect,
viewport_rect,
false);
- TestRenderPassMaskColorMatrixProgram(TexCoordPrecisionMedium, blend_mode);
+ TestRenderPassMaskColorMatrixProgram(
+ TexCoordPrecisionMedium, SamplerType2D, blend_mode);
// RenderPassProgramAA
render_passes_in_draw_order_.clear();
@@ -1607,7 +1611,8 @@
viewport_rect,
viewport_rect,
false);
- TestRenderPassMaskProgramAA(TexCoordPrecisionMedium, blend_mode);
+ TestRenderPassMaskProgramAA(
+ TexCoordPrecisionMedium, SamplerType2D, blend_mode);
// RenderPassMaskColorMatrixProgramAA
render_passes_in_draw_order_.clear();
@@ -1632,7 +1637,8 @@
viewport_rect,
viewport_rect,
false);
- TestRenderPassMaskColorMatrixProgramAA(TexCoordPrecisionMedium, blend_mode);
+ TestRenderPassMaskColorMatrixProgramAA(
+ TexCoordPrecisionMedium, SamplerType2D, blend_mode);
}
}
diff --git a/cc/output/overlay_strategy_single_on_top.cc b/cc/output/overlay_strategy_single_on_top.cc
index 94d35c4..98d77ad 100644
--- a/cc/output/overlay_strategy_single_on_top.cc
+++ b/cc/output/overlay_strategy_single_on_top.cc
@@ -30,7 +30,7 @@
QuadList& quad_list = root_render_pass->quad_list;
auto candidate_iterator = quad_list.end();
for (auto it = quad_list.begin(); it != quad_list.end(); ++it) {
- const DrawQuad* draw_quad = &*it;
+ const DrawQuad* draw_quad = *it;
if (draw_quad->material == DrawQuad::TEXTURE_CONTENT) {
const TextureDrawQuad& quad = *TextureDrawQuad::MaterialCast(draw_quad);
if (!resource_provider_->AllowOverlay(quad.resource_id)) {
@@ -58,7 +58,7 @@
if (candidate_iterator == quad_list.end())
return false;
const TextureDrawQuad& quad =
- *TextureDrawQuad::MaterialCast(&*candidate_iterator);
+ *TextureDrawQuad::MaterialCast(*candidate_iterator);
// Simple quads only.
gfx::OverlayTransform overlay_transform =
diff --git a/cc/output/shader.cc b/cc/output/shader.cc
index 354f65f..eb5041a 100644
--- a/cc/output/shader.cc
+++ b/cc/output/shader.cc
@@ -1434,13 +1434,13 @@
// clang-format on
precision mediump float;
varying TexCoordPrecision vec2 v_texCoord;
- uniform SamplerType s_texture;
+ uniform sampler2D s_texture;
uniform SamplerType s_mask;
uniform TexCoordPrecision vec2 maskTexCoordScale;
uniform TexCoordPrecision vec2 maskTexCoordOffset;
uniform float alpha;
void main() {
- vec4 texColor = TextureLookup(s_texture, v_texCoord);
+ vec4 texColor = texture2D(s_texture, v_texCoord);
TexCoordPrecision vec2 maskTexCoord =
vec2(maskTexCoordOffset.x + v_texCoord.x * maskTexCoordScale.x,
maskTexCoordOffset.y + v_texCoord.y * maskTexCoordScale.y);
@@ -1494,7 +1494,7 @@
return FRAGMENT_SHADER(
// clang-format on
precision mediump float;
- uniform SamplerType s_texture;
+ uniform sampler2D s_texture;
uniform SamplerType s_mask;
uniform TexCoordPrecision vec2 maskTexCoordScale;
uniform TexCoordPrecision vec2 maskTexCoordOffset;
@@ -1503,7 +1503,7 @@
varying TexCoordPrecision vec4 edge_dist[2]; // 8 edge distances.
void main() {
- vec4 texColor = TextureLookup(s_texture, v_texCoord);
+ vec4 texColor = texture2D(s_texture, v_texCoord);
TexCoordPrecision vec2 maskTexCoord =
vec2(maskTexCoordOffset.x + v_texCoord.x * maskTexCoordScale.x,
maskTexCoordOffset.y + v_texCoord.y * maskTexCoordScale.y);
@@ -1567,7 +1567,7 @@
return FRAGMENT_SHADER(
// clang-format on
precision mediump float;
- uniform SamplerType s_texture;
+ uniform sampler2D s_texture;
uniform SamplerType s_mask;
uniform vec2 maskTexCoordScale;
uniform vec2 maskTexCoordOffset;
@@ -1578,7 +1578,7 @@
varying TexCoordPrecision vec4 edge_dist[2]; // 8 edge distances.
void main() {
- vec4 texColor = TextureLookup(s_texture, v_texCoord);
+ vec4 texColor = texture2D(s_texture, v_texCoord);
float nonZeroAlpha = max(texColor.a, 0.00001);
texColor = vec4(texColor.rgb / nonZeroAlpha, nonZeroAlpha);
texColor = colorMatrix * texColor + colorOffset;
@@ -1705,7 +1705,7 @@
// clang-format on
precision mediump float;
varying TexCoordPrecision vec2 v_texCoord;
- uniform SamplerType s_texture;
+ uniform sampler2D s_texture;
uniform SamplerType s_mask;
uniform vec2 maskTexCoordScale;
uniform vec2 maskTexCoordOffset;
@@ -1713,7 +1713,7 @@
uniform vec4 colorOffset;
uniform float alpha;
void main() {
- vec4 texColor = TextureLookup(s_texture, v_texCoord);
+ vec4 texColor = texture2D(s_texture, v_texCoord);
float nonZeroAlpha = max(texColor.a, 0.00001);
texColor = vec4(texColor.rgb / nonZeroAlpha, nonZeroAlpha);
texColor = colorMatrix * texColor + colorOffset;
diff --git a/cc/quads/list_container.cc b/cc/quads/list_container.cc
index cc09a22..04e1c97 100644
--- a/cc/quads/list_container.cc
+++ b/cc/quads/list_container.cc
@@ -272,7 +272,7 @@
template <typename BaseElementType>
void ListContainer<BaseElementType>::EraseAndInvalidateAllPointers(
typename ListContainer<BaseElementType>::Iterator position) {
- BaseElementType* item = &*position;
+ BaseElementType* item = *position;
item->~BaseElementType();
data_->Erase(position);
}
@@ -376,25 +376,25 @@
template <typename BaseElementType>
BaseElementType* ListContainer<BaseElementType>::front() {
Iterator iter = begin();
- return &*iter;
+ return *iter;
}
template <typename BaseElementType>
BaseElementType* ListContainer<BaseElementType>::back() {
ReverseIterator iter = rbegin();
- return &*iter;
+ return *iter;
}
template <typename BaseElementType>
const BaseElementType* ListContainer<BaseElementType>::front() const {
ConstIterator iter = begin();
- return &*iter;
+ return *iter;
}
template <typename BaseElementType>
const BaseElementType* ListContainer<BaseElementType>::back() const {
ConstReverseIterator iter = rbegin();
- return &*iter;
+ return *iter;
}
template <typename BaseElementType>
@@ -409,10 +409,10 @@
break;
index -= current_size;
}
- return &*ConstIterator(data_.get(),
- list_index,
- data_->InnerListById(list_index)->ElementAt(index),
- original_index);
+ return *ConstIterator(data_.get(),
+ list_index,
+ data_->InnerListById(list_index)->ElementAt(index),
+ original_index);
}
template <typename BaseElementType>
@@ -426,10 +426,10 @@
break;
index -= current_size;
}
- return &*Iterator(data_.get(),
- list_index,
- data_->InnerListById(list_index)->ElementAt(index),
- original_index);
+ return *Iterator(data_.get(),
+ list_index,
+ data_->InnerListById(list_index)->ElementAt(index),
+ original_index);
}
template <typename BaseElementType>
@@ -486,8 +486,8 @@
}
template <typename BaseElementType>
-BaseElementType& ListContainer<BaseElementType>::Iterator::operator*() const {
- return *(reinterpret_cast<BaseElementType*>(this->item_iterator));
+BaseElementType* ListContainer<BaseElementType>::Iterator::operator*() const {
+ return reinterpret_cast<BaseElementType*>(this->item_iterator);
}
template <typename BaseElementType>
@@ -500,9 +500,9 @@
}
template <typename BaseElementType>
-typename ListContainer<BaseElementType>::Iterator
-ListContainer<BaseElementType>::Iterator::
-operator++() {
+typename ListContainer<BaseElementType>::Iterator&
+ ListContainer<BaseElementType>::Iterator::
+ operator++() {
this->Increment();
++index_;
return *this;
@@ -542,9 +542,9 @@
}
template <typename BaseElementType>
-const BaseElementType& ListContainer<BaseElementType>::ConstIterator::
+const BaseElementType* ListContainer<BaseElementType>::ConstIterator::
operator*() const {
- return *(reinterpret_cast<const BaseElementType*>(this->item_iterator));
+ return reinterpret_cast<const BaseElementType*>(this->item_iterator);
}
template <typename BaseElementType>
@@ -557,9 +557,9 @@
}
template <typename BaseElementType>
-typename ListContainer<BaseElementType>::ConstIterator
-ListContainer<BaseElementType>::ConstIterator::
-operator++() {
+typename ListContainer<BaseElementType>::ConstIterator&
+ ListContainer<BaseElementType>::ConstIterator::
+ operator++() {
this->Increment();
++index_;
return *this;
@@ -593,9 +593,9 @@
}
template <typename BaseElementType>
-BaseElementType& ListContainer<BaseElementType>::ReverseIterator::operator*()
+BaseElementType* ListContainer<BaseElementType>::ReverseIterator::operator*()
const {
- return *(reinterpret_cast<BaseElementType*>(this->item_iterator));
+ return reinterpret_cast<BaseElementType*>(this->item_iterator);
}
template <typename BaseElementType>
@@ -608,9 +608,9 @@
}
template <typename BaseElementType>
-typename ListContainer<BaseElementType>::ReverseIterator
-ListContainer<BaseElementType>::ReverseIterator::
-operator++() {
+typename ListContainer<BaseElementType>::ReverseIterator&
+ ListContainer<BaseElementType>::ReverseIterator::
+ operator++() {
this->ReverseIncrement();
++index_;
return *this;
@@ -650,9 +650,9 @@
}
template <typename BaseElementType>
-const BaseElementType& ListContainer<BaseElementType>::ConstReverseIterator::
+const BaseElementType* ListContainer<BaseElementType>::ConstReverseIterator::
operator*() const {
- return *(reinterpret_cast<const BaseElementType*>(this->item_iterator));
+ return reinterpret_cast<const BaseElementType*>(this->item_iterator);
}
template <typename BaseElementType>
@@ -665,9 +665,9 @@
}
template <typename BaseElementType>
-typename ListContainer<BaseElementType>::ConstReverseIterator
-ListContainer<BaseElementType>::ConstReverseIterator::
-operator++() {
+typename ListContainer<BaseElementType>::ConstReverseIterator&
+ ListContainer<BaseElementType>::ConstReverseIterator::
+ operator++() {
this->ReverseIncrement();
++index_;
return *this;
diff --git a/cc/quads/list_container.h b/cc/quads/list_container.h
index 24c24ac..a61a6e0 100644
--- a/cc/quads/list_container.h
+++ b/cc/quads/list_container.h
@@ -78,9 +78,9 @@
size_t index);
~Iterator();
BaseElementType* operator->() const;
- BaseElementType& operator*() const;
+ BaseElementType* operator*() const;
Iterator operator++(int unused_post_increment);
- Iterator operator++();
+ Iterator& operator++();
size_t index() const;
@@ -103,9 +103,9 @@
ConstIterator(const Iterator& other); // NOLINT
~ConstIterator();
const BaseElementType* operator->() const;
- const BaseElementType& operator*() const;
+ const BaseElementType* operator*() const;
ConstIterator operator++(int unused_post_increment);
- ConstIterator operator++();
+ ConstIterator& operator++();
size_t index() const;
@@ -128,9 +128,9 @@
size_t index);
~ReverseIterator();
BaseElementType* operator->() const;
- BaseElementType& operator*() const;
+ BaseElementType* operator*() const;
ReverseIterator operator++(int unused_post_increment);
- ReverseIterator operator++();
+ ReverseIterator& operator++();
size_t index() const;
@@ -154,9 +154,9 @@
ConstReverseIterator(const ReverseIterator& other); // NOLINT
~ConstReverseIterator();
const BaseElementType* operator->() const;
- const BaseElementType& operator*() const;
+ const BaseElementType* operator*() const;
ConstReverseIterator operator++(int unused_post_increment);
- ConstReverseIterator operator++();
+ ConstReverseIterator& operator++();
size_t index() const;
diff --git a/cc/quads/list_container_unittest.cc b/cc/quads/list_container_unittest.cc
index 31a479b..d68ab59 100644
--- a/cc/quads/list_container_unittest.cc
+++ b/cc/quads/list_container_unittest.cc
@@ -230,7 +230,7 @@
sqs_list.begin();
sqs_iter != sqs_list.end();
++sqs_iter) {
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_EQ(*sqs_iter, *iter);
++iter;
}
}
@@ -315,7 +315,7 @@
sqs_list.begin();
sqs_iter != sqs_list.end();
++sqs_iter) {
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_EQ(*sqs_iter, *iter);
++iter;
}
}
@@ -335,7 +335,7 @@
for (ListContainer<SharedQuadState>::Iterator iter = list.begin();
iter != list.end();
++iter) {
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_EQ(*sqs_iter, *iter);
++num_iters_in_list;
++sqs_iter;
}
@@ -348,7 +348,7 @@
sqs_list.begin();
sqs_iter != sqs_list.end();
++sqs_iter) {
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_EQ(*sqs_iter, *iter);
++num_iters_in_vector;
++iter;
}
@@ -372,8 +372,8 @@
for (ListContainer<SharedQuadState>::ConstIterator iter = list.begin();
iter != list.end();
++iter) {
- EXPECT_TRUE(isConstSharedQuadStatePointer(&*iter));
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_TRUE(isConstSharedQuadStatePointer(*iter));
+ EXPECT_EQ(*sqs_iter, *iter);
++sqs_iter;
}
}
@@ -384,8 +384,8 @@
for (ListContainer<SharedQuadState>::Iterator iter = list.begin();
iter != list.end();
++iter) {
- EXPECT_FALSE(isConstSharedQuadStatePointer(&*iter));
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_FALSE(isConstSharedQuadStatePointer(*iter));
+ EXPECT_EQ(*sqs_iter, *iter);
++sqs_iter;
}
}
@@ -396,7 +396,7 @@
sqs_list.begin();
sqs_iter != sqs_list.end();
++sqs_iter) {
- EXPECT_EQ(*sqs_iter, &*iter);
+ EXPECT_EQ(*sqs_iter, *iter);
++iter;
}
}
@@ -417,7 +417,7 @@
for (ListContainer<SharedQuadState>::ReverseIterator iter = list.rbegin();
iter != list.rend();
++iter) {
- EXPECT_EQ(*sqs_iter, &(*iter));
+ EXPECT_EQ(*sqs_iter, *iter);
++sqs_iter;
}
}
@@ -428,7 +428,7 @@
sqs_list.rbegin();
sqs_iter != sqs_list.rend();
++sqs_iter) {
- EXPECT_EQ(*sqs_iter, &(*iter));
+ EXPECT_EQ(*sqs_iter, *iter);
++iter;
}
}
@@ -451,7 +451,7 @@
for (ListContainer<DrawQuad>::Iterator iter = list.begin();
iter != list.end();
++iter) {
- EXPECT_EQ(i, static_cast<SimpleDrawQuad*>(&*iter)->get_value());
+ EXPECT_EQ(i, static_cast<SimpleDrawQuad*>(*iter)->get_value());
++i;
}
}
@@ -468,7 +468,7 @@
ListContainer<DrawQuad>::Iterator iter = list.begin();
for (int i = 0; i < 10; ++i) {
- static_cast<SimpleDrawQuad*>(&*iter)->set_value(i);
+ static_cast<SimpleDrawQuad*>(*iter)->set_value(i);
++iter;
}
diff --git a/cc/quads/render_pass.cc b/cc/quads/render_pass.cc
index 2cb72de..e311aec 100644
--- a/cc/quads/render_pass.cc
+++ b/cc/quads/render_pass.cc
@@ -115,29 +115,29 @@
for (const auto& shared_quad_state : source->shared_quad_state_list) {
SharedQuadState* copy_shared_quad_state =
copy_pass->CreateAndAppendSharedQuadState();
- copy_shared_quad_state->CopyFrom(&shared_quad_state);
+ copy_shared_quad_state->CopyFrom(shared_quad_state);
}
SharedQuadStateList::Iterator sqs_iter =
source->shared_quad_state_list.begin();
SharedQuadStateList::Iterator copy_sqs_iter =
copy_pass->shared_quad_state_list.begin();
for (const auto& quad : source->quad_list) {
- while (quad.shared_quad_state != &*sqs_iter) {
+ while (quad->shared_quad_state != *sqs_iter) {
++sqs_iter;
++copy_sqs_iter;
DCHECK(sqs_iter != source->shared_quad_state_list.end());
}
- DCHECK(quad.shared_quad_state == &*sqs_iter);
+ DCHECK(quad->shared_quad_state == *sqs_iter);
- SharedQuadState* copy_shared_quad_state = &*copy_sqs_iter;
+ SharedQuadState* copy_shared_quad_state = *copy_sqs_iter;
- if (quad.material == DrawQuad::RENDER_PASS) {
+ if (quad->material == DrawQuad::RENDER_PASS) {
const RenderPassDrawQuad* pass_quad =
- RenderPassDrawQuad::MaterialCast(&quad);
+ RenderPassDrawQuad::MaterialCast(quad);
copy_pass->CopyFromAndAppendRenderPassDrawQuad(
pass_quad, copy_shared_quad_state, pass_quad->render_pass_id);
} else {
- copy_pass->CopyFromAndAppendDrawQuad(&quad, copy_shared_quad_state);
+ copy_pass->CopyFromAndAppendDrawQuad(quad, copy_shared_quad_state);
}
}
out->push_back(copy_pass.Pass());
@@ -196,7 +196,7 @@
value->BeginArray("shared_quad_state_list");
for (const auto& shared_quad_state : shared_quad_state_list) {
value->BeginDictionary();
- shared_quad_state.AsValueInto(value);
+ shared_quad_state->AsValueInto(value);
value->EndDictionary();
}
value->EndArray();
@@ -204,7 +204,7 @@
value->BeginArray("quad_list");
for (const auto& quad : quad_list) {
value->BeginDictionary();
- quad.AsValueInto(value);
+ quad->AsValueInto(value);
value->EndDictionary();
}
value->EndArray();
diff --git a/cc/resources/one_copy_raster_worker_pool.cc b/cc/resources/one_copy_raster_worker_pool.cc
index 831bb76..c803262 100644
--- a/cc/resources/one_copy_raster_worker_pool.cc
+++ b/cc/resources/one_copy_raster_worker_pool.cc
@@ -47,7 +47,8 @@
// Return raster resource to pool so it can be used by another RasterBuffer
// instance.
- resource_pool_->ReleaseResource(raster_resource_.Pass());
+ if (raster_resource_)
+ resource_pool_->ReleaseResource(raster_resource_.Pass());
}
// Overridden from RasterBuffer:
@@ -55,22 +56,14 @@
const gfx::Rect& rect,
float scale,
RenderingStatsInstrumentation* stats) override {
- gfx::GpuMemoryBuffer* gpu_memory_buffer = lock_->GetGpuMemoryBuffer();
- if (!gpu_memory_buffer)
- return;
-
- RasterWorkerPool::PlaybackToMemory(gpu_memory_buffer->Map(),
- raster_resource_->format(),
- raster_resource_->size(),
- gpu_memory_buffer->GetStride(),
- raster_source,
- rect,
- scale,
- stats);
- gpu_memory_buffer->Unmap();
-
- sequence_ = worker_pool_->ScheduleCopyOnWorkerThread(
- lock_.Pass(), raster_resource_.get(), resource_);
+ sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
+ lock_.Pass(),
+ raster_resource_.Pass(),
+ resource_,
+ raster_source,
+ rect,
+ scale,
+ stats);
}
private:
@@ -88,13 +81,23 @@
// Flush interval when performing copy operations.
const int kCopyFlushPeriod = 4;
+// Number of in-flight copy operations to allow.
+const int kMaxCopyOperations = 16;
+
+// Delay been checking for copy operations to complete.
+const int kCheckForCompletedCopyOperationsTickRateMs = 1;
+
+// Number of failed attempts to allow before we perform a check that will
+// wait for copy operations to complete if needed.
+const int kFailedAttemptsBeforeWaitIfNeeded = 256;
+
} // namespace
OneCopyRasterWorkerPool::CopyOperation::CopyOperation(
scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
- ResourceProvider::ResourceId src,
- ResourceProvider::ResourceId dst)
- : write_lock(write_lock.Pass()), src(src), dst(dst) {
+ scoped_ptr<ScopedResource> src,
+ const Resource* dst)
+ : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) {
}
OneCopyRasterWorkerPool::CopyOperation::~CopyOperation() {
@@ -129,13 +132,20 @@
resource_pool_(resource_pool),
last_issued_copy_operation_(0),
last_flushed_copy_operation_(0),
+ lock_(),
+ copy_operation_count_cv_(&lock_),
+ scheduled_copy_operation_count_(0),
+ issued_copy_operation_count_(0),
next_copy_operation_sequence_(1),
+ check_for_completed_copy_operations_pending_(false),
+ shutdown_(false),
weak_ptr_factory_(this),
raster_finished_weak_ptr_factory_(this) {
DCHECK(context_provider_);
}
OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() {
+ DCHECK_EQ(scheduled_copy_operation_count_, 0u);
}
Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() {
@@ -149,6 +159,13 @@
void OneCopyRasterWorkerPool::Shutdown() {
TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown");
+ {
+ base::AutoLock lock(lock_);
+
+ shutdown_ = true;
+ copy_operation_count_cv_.Signal();
+ }
+
TaskGraph empty;
task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
@@ -182,7 +199,7 @@
task_set));
}
- resource_pool_->CheckBusyResources();
+ resource_pool_->CheckBusyResources(false);
for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
it != queue->items.end();
@@ -256,19 +273,74 @@
// Nothing to do here. RasterBufferImpl destructor cleans up after itself.
}
-CopySequenceNumber OneCopyRasterWorkerPool::ScheduleCopyOnWorkerThread(
+CopySequenceNumber
+OneCopyRasterWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
- const Resource* src,
- const Resource* dst) {
+ scoped_ptr<ScopedResource> src,
+ const Resource* dst,
+ const RasterSource* raster_source,
+ const gfx::Rect& rect,
+ float scale,
+ RenderingStatsInstrumentation* stats) {
CopySequenceNumber sequence;
{
base::AutoLock lock(lock_);
+ int failed_attempts = 0;
+ while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >=
+ kMaxCopyOperations) {
+ // Ignore limit when shutdown is set.
+ if (shutdown_)
+ break;
+
+ ++failed_attempts;
+
+ // Schedule a check that will also wait for operations to complete
+ // after too many failed attempts.
+ bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
+
+ // Schedule a check for completed copy operations if too many operations
+ // are currently in-flight.
+ ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
+
+ {
+ TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
+
+ // Wait for in-flight copy operations to drop below limit.
+ copy_operation_count_cv_.Wait();
+ }
+ }
+
+ // Increment |scheduled_copy_operation_count_| before releasing |lock_|.
+ ++scheduled_copy_operation_count_;
+
+ // There may be more work available, so wake up another worker thread.
+ copy_operation_count_cv_.Signal();
+
+ {
+ base::AutoUnlock unlock(lock_);
+
+ gfx::GpuMemoryBuffer* gpu_memory_buffer =
+ write_lock->GetGpuMemoryBuffer();
+ if (gpu_memory_buffer) {
+ RasterWorkerPool::PlaybackToMemory(gpu_memory_buffer->Map(),
+ src->format(),
+ src->size(),
+ gpu_memory_buffer->GetStride(),
+ raster_source,
+ rect,
+ scale,
+ stats);
+ gpu_memory_buffer->Unmap();
+ }
+ }
+
+ // Acquire a sequence number for this copy operation.
sequence = next_copy_operation_sequence_++;
- pending_copy_operations_.push_back(make_scoped_ptr(
- new CopyOperation(write_lock.Pass(), src->id(), dst->id())));
+ pending_copy_operations_.push_back(
+ make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst)));
}
// Post task that will advance last flushed copy operation to |sequence|
@@ -333,6 +405,13 @@
DCHECK(!pending_copy_operations_.empty());
copy_operations.push_back(pending_copy_operations_.take_front());
}
+
+ // Decrement |scheduled_copy_operation_count_| and increment
+ // |issued_copy_operation_count_| to reflect the transition of copy
+ // operations from "pending" to "issued" state.
+ DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size());
+ scheduled_copy_operation_count_ -= copy_operations.size();
+ issued_copy_operation_count_ += copy_operations.size();
}
while (!copy_operations.empty()) {
@@ -342,7 +421,68 @@
copy_operation->write_lock.reset();
// Copy contents of source resource to destination resource.
- resource_provider_->CopyResource(copy_operation->src, copy_operation->dst);
+ resource_provider_->CopyResource(copy_operation->src->id(),
+ copy_operation->dst->id());
+
+ // Return source resource to pool where it can be reused once copy
+ // operation has completed and resource is no longer busy.
+ resource_pool_->ReleaseResource(copy_operation->src.Pass());
+ }
+}
+
+void OneCopyRasterWorkerPool::
+ ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
+ bool wait_if_needed) {
+ lock_.AssertAcquired();
+
+ if (check_for_completed_copy_operations_pending_)
+ return;
+
+ base::TimeTicks now = base::TimeTicks::Now();
+
+ // Schedule a check for completed copy operations as soon as possible but
+ // don't allow two consecutive checks to be scheduled to run less than the
+ // tick rate apart.
+ base::TimeTicks next_check_for_completed_copy_operations_time =
+ std::max(last_check_for_completed_copy_operations_time_ +
+ base::TimeDelta::FromMilliseconds(
+ kCheckForCompletedCopyOperationsTickRateMs),
+ now);
+
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&OneCopyRasterWorkerPool::CheckForCompletedCopyOperations,
+ weak_ptr_factory_.GetWeakPtr(),
+ wait_if_needed),
+ next_check_for_completed_copy_operations_time - now);
+
+ last_check_for_completed_copy_operations_time_ =
+ next_check_for_completed_copy_operations_time;
+ check_for_completed_copy_operations_pending_ = true;
+}
+
+void OneCopyRasterWorkerPool::CheckForCompletedCopyOperations(
+ bool wait_if_needed) {
+ TRACE_EVENT1("cc",
+ "OneCopyRasterWorkerPool::CheckForCompletedCopyOperations",
+ "wait_if_needed",
+ wait_if_needed);
+
+ resource_pool_->CheckBusyResources(wait_if_needed);
+
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK(check_for_completed_copy_operations_pending_);
+ check_for_completed_copy_operations_pending_ = false;
+
+ // The number of busy resources in the pool reflects the number of issued
+ // copy operations that have not yet completed.
+ issued_copy_operation_count_ = resource_pool_->busy_resource_count();
+
+ // There may be work blocked on too many in-flight copy operations, so wake
+ // up a worker thread.
+ copy_operation_count_cv_.Signal();
}
}
@@ -361,6 +501,7 @@
return state;
}
+
void OneCopyRasterWorkerPool::StagingStateAsValueInto(
base::debug::TracedValue* staging_state) const {
staging_state->SetInteger("staging_resource_count",
diff --git a/cc/resources/one_copy_raster_worker_pool.h b/cc/resources/one_copy_raster_worker_pool.h
index 7372fb7..1dc102f 100644
--- a/cc/resources/one_copy_raster_worker_pool.h
+++ b/cc/resources/one_copy_raster_worker_pool.h
@@ -23,7 +23,7 @@
namespace cc {
class ResourcePool;
-class ResourceProvider;
+class ScopedResource;
typedef int64 CopySequenceNumber;
@@ -54,12 +54,16 @@
const Resource* resource) override;
void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) override;
- // Schedule copy of |src| resource to |dst| resource. Returns a non-zero
- // sequence number for this copy operation.
- CopySequenceNumber ScheduleCopyOnWorkerThread(
+ // Playback raster source and schedule copy of |src| resource to |dst|
+ // resource. Returns a non-zero sequence number for this copy operation.
+ CopySequenceNumber PlaybackAndScheduleCopyOnWorkerThread(
scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
- const Resource* src,
- const Resource* dst);
+ scoped_ptr<ScopedResource> src,
+ const Resource* dst,
+ const RasterSource* raster_source,
+ const gfx::Rect& rect,
+ float scale,
+ RenderingStatsInstrumentation* stats);
// Issues copy operations until |sequence| has been processed. This will
// return immediately if |sequence| has already been processed.
@@ -78,18 +82,21 @@
CopyOperation(
scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
- ResourceProvider::ResourceId src,
- ResourceProvider::ResourceId dst);
+ scoped_ptr<ScopedResource> src,
+ const Resource* dst);
~CopyOperation();
scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock;
- ResourceProvider::ResourceId src;
- ResourceProvider::ResourceId dst;
+ scoped_ptr<ScopedResource> src;
+ const Resource* dst;
};
void OnRasterFinished(TaskSet task_set);
void AdvanceLastFlushedCopyTo(CopySequenceNumber sequence);
void IssueCopyOperations(int64 count);
+ void ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
+ bool wait_if_needed);
+ void CheckForCompletedCopyOperations(bool wait_if_needed);
scoped_refptr<base::debug::ConvertableToTraceFormat> StateAsValue() const;
void StagingStateAsValueInto(base::debug::TracedValue* staging_state) const;
@@ -112,8 +119,14 @@
base::Lock lock_;
// |lock_| must be acquired when accessing the following members.
+ base::ConditionVariable copy_operation_count_cv_;
+ size_t scheduled_copy_operation_count_;
+ size_t issued_copy_operation_count_;
CopyOperation::Deque pending_copy_operations_;
CopySequenceNumber next_copy_operation_sequence_;
+ bool check_for_completed_copy_operations_pending_;
+ base::TimeTicks last_check_for_completed_copy_operations_time_;
+ bool shutdown_;
base::WeakPtrFactory<OneCopyRasterWorkerPool> weak_ptr_factory_;
// "raster finished" tasks need their own factory as they need to be
diff --git a/cc/resources/picture_pile.cc b/cc/resources/picture_pile.cc
index 2f62056..e00ed54 100644
--- a/cc/resources/picture_pile.cc
+++ b/cc/resources/picture_pile.cc
@@ -570,7 +570,8 @@
}
skia::AnalysisCanvas canvas(recorded_viewport_.width(),
recorded_viewport_.height());
- picture->Raster(&canvas, NULL, Region(), 1.0f);
+ canvas.translate(-recorded_viewport_.x(), -recorded_viewport_.y());
+ picture->Raster(&canvas, nullptr, Region(), 1.0f);
is_solid_color_ = canvas.GetColorIfSolid(&solid_color_);
}
diff --git a/cc/resources/picture_pile_unittest.cc b/cc/resources/picture_pile_unittest.cc
index d6d75fa..98415ca 100644
--- a/cc/resources/picture_pile_unittest.cc
+++ b/cc/resources/picture_pile_unittest.cc
@@ -1432,5 +1432,29 @@
EXPECT_FALSE(pile_.is_solid_color());
}
+TEST_F(PicturePileTest, NonSolidRectangleOnOffsettedLayerIsNonSolid) {
+ gfx::Rect visible_rect(tiling_rect());
+ visible_rect.Offset(gfx::Vector2d(1000, 1000));
+ // The picture pile requires that the tiling completely encompass the viewport
+ // to make this test work correctly since the recorded viewport is an
+ // intersection of the tile size and viewport rect. This is possibly a flaw
+ // in |PicturePile|.
+ gfx::Size tiling_size(visible_rect.right(), visible_rect.bottom());
+ // |Setup()| will create pictures here that mess with the test, clear it!
+ pile_.Clear();
+
+ SkPaint paint;
+ paint.setColor(SK_ColorCYAN);
+
+ // Add a rect that doesn't cover the viewport completely, the solid state
+ // will be false.
+ gfx::Rect smallRect = visible_rect;
+ smallRect.Inset(10, 10, 10, 10);
+ client_.add_draw_rect(smallRect, paint);
+ Region invalidation(visible_rect);
+ UpdateAndExpandInvalidation(&invalidation, tiling_size, visible_rect);
+ EXPECT_FALSE(pile_.is_solid_color());
+}
+
} // namespace
} // namespace cc
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.cc b/cc/resources/pixel_buffer_raster_worker_pool.cc
index 3409655..26ab728 100644
--- a/cc/resources/pixel_buffer_raster_worker_pool.cc
+++ b/cc/resources/pixel_buffer_raster_worker_pool.cc
@@ -169,7 +169,7 @@
CheckForCompletedRasterizerTasks();
CheckForCompletedUploads();
- check_for_completed_raster_task_notifier_.Cancel();
+ check_for_completed_raster_task_notifier_.Shutdown();
for (RasterTaskState::Vector::iterator it = raster_task_states_.begin();
it != raster_task_states_.end();
diff --git a/cc/resources/resource_pool.cc b/cc/resources/resource_pool.cc
index 31b9b27..45f36e2 100644
--- a/cc/resources/resource_pool.cc
+++ b/cc/resources/resource_pool.cc
@@ -105,12 +105,15 @@
return false;
}
-void ResourcePool::CheckBusyResources() {
+void ResourcePool::CheckBusyResources(bool wait_if_needed) {
ResourceList::iterator it = busy_resources_.begin();
while (it != busy_resources_.end()) {
ScopedResource* resource = *it;
+ if (wait_if_needed)
+ resource_provider_->WaitReadLockIfNeeded(resource->id());
+
if (resource_provider_->CanLockForWrite(resource->id())) {
DidFinishUsingResource(resource);
it = busy_resources_.erase(it);
diff --git a/cc/resources/resource_pool.h b/cc/resources/resource_pool.h
index 5f481e9..e1ee35a 100644
--- a/cc/resources/resource_pool.h
+++ b/cc/resources/resource_pool.h
@@ -34,7 +34,10 @@
size_t max_resource_count);
void ReduceResourceUsage();
- void CheckBusyResources();
+ // This might block if |wait_if_needed| is true and one of the currently
+ // busy resources has a read lock fence that needs to be waited upon before
+ // it can be locked for write again.
+ void CheckBusyResources(bool wait_if_needed);
size_t total_memory_usage_bytes() const { return memory_usage_bytes_; }
size_t acquired_memory_usage_bytes() const {
@@ -44,6 +47,7 @@
size_t acquired_resource_count() const {
return resource_count_ - unused_resources_.size();
}
+ size_t busy_resource_count() const { return busy_resources_.size(); }
ResourceFormat resource_format() const { return format_; }
diff --git a/cc/resources/resource_provider.cc b/cc/resources/resource_provider.cc
index 14f4880..5802383 100644
--- a/cc/resources/resource_provider.cc
+++ b/cc/resources/resource_provider.cc
@@ -213,6 +213,10 @@
query_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
return !!available;
}
+ void Wait() override {
+ unsigned result = 0;
+ gl_->GetQueryObjectuivEXT(query_id_, GL_QUERY_RESULT_EXT, &result);
+ }
private:
~QueryFence() override {}
@@ -724,7 +728,7 @@
resource->pixels = NULL;
}
if (resource->gpu_memory_buffer) {
- DCHECK(resource->origin != Resource::External);
+ DCHECK(resource->origin == Resource::Internal);
delete resource->gpu_memory_buffer;
resource->gpu_memory_buffer = NULL;
}
@@ -1026,6 +1030,7 @@
}
ResourceProvider::ScopedWriteLockSoftware::~ScopedWriteLockSoftware() {
+ DCHECK(thread_checker_.CalledOnValidThread());
resource_provider_->UnlockForWrite(resource_);
}
@@ -1044,6 +1049,7 @@
ResourceProvider::ScopedWriteLockGpuMemoryBuffer::
~ScopedWriteLockGpuMemoryBuffer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
resource_provider_->UnlockForWrite(resource_);
if (!gpu_memory_buffer_)
return;
@@ -1089,11 +1095,13 @@
}
ResourceProvider::ScopedWriteLockGr::~ScopedWriteLockGr() {
+ DCHECK(thread_checker_.CalledOnValidThread());
resource_provider_->UnlockForWrite(resource_);
}
SkSurface* ResourceProvider::ScopedWriteLockGr::GetSkSurface(
bool use_distance_field_text) {
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(resource_->locked_for_write);
// If the surface doesn't exist, or doesn't have the correct dff setting,
@@ -1126,6 +1134,35 @@
return resource_->sk_surface.get();
}
+ResourceProvider::SynchronousFence::SynchronousFence(
+ gpu::gles2::GLES2Interface* gl)
+ : gl_(gl), has_synchronized_(true) {
+}
+
+ResourceProvider::SynchronousFence::~SynchronousFence() {
+}
+
+void ResourceProvider::SynchronousFence::Set() {
+ has_synchronized_ = false;
+}
+
+bool ResourceProvider::SynchronousFence::HasPassed() {
+ if (!has_synchronized_) {
+ has_synchronized_ = true;
+ Synchronize();
+ }
+ return true;
+}
+
+void ResourceProvider::SynchronousFence::Wait() {
+ HasPassed();
+}
+
+void ResourceProvider::SynchronousFence::Synchronize() {
+ TRACE_EVENT0("cc", "ResourceProvider::SynchronousFence::Synchronize");
+ gl_->Finish();
+}
+
ResourceProvider::ResourceProvider(
OutputSurface* output_surface,
SharedBitmapManager* shared_bitmap_manager,
@@ -2019,11 +2056,12 @@
gl->BindTexture(source_resource->target, source_resource->gl_id);
BindImageForSampling(source_resource);
}
- DCHECK(use_sync_query_) << "CHROMIUM_sync_query extension missing";
- if (!source_resource->gl_read_lock_query_id)
- gl->GenQueriesEXT(1, &source_resource->gl_read_lock_query_id);
- gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
- source_resource->gl_read_lock_query_id);
+ if (use_sync_query_) {
+ if (!source_resource->gl_read_lock_query_id)
+ gl->GenQueriesEXT(1, &source_resource->gl_read_lock_query_id);
+ gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
+ source_resource->gl_read_lock_query_id);
+ }
DCHECK(!dest_resource->image_id);
dest_resource->allocated = true;
gl->CopyTextureCHROMIUM(dest_resource->target,
@@ -2032,11 +2070,22 @@
0,
GLInternalFormat(dest_resource->format),
GLDataType(dest_resource->format));
- // End query and create a read lock fence that will prevent access to
- // source resource until CopyTextureCHROMIUM command has completed.
- gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
- source_resource->read_lock_fence = make_scoped_refptr(
- new QueryFence(gl, source_resource->gl_read_lock_query_id));
+ if (source_resource->gl_read_lock_query_id) {
+ // End query and create a read lock fence that will prevent access to
+ // source resource until CopyTextureCHROMIUM command has completed.
+ gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ source_resource->read_lock_fence = make_scoped_refptr(
+ new QueryFence(gl, source_resource->gl_read_lock_query_id));
+ } else {
+ // Create a SynchronousFence when CHROMIUM_sync_query extension is missing.
+ // Try to use one synchronous fence for as many CopyResource operations as
+ // possible as that reduce the number of times we have to synchronize with
+ // the GL.
+ if (!synchronous_fence_.get() || synchronous_fence_->has_synchronized())
+ synchronous_fence_ = make_scoped_refptr(new SynchronousFence(gl));
+ source_resource->read_lock_fence = synchronous_fence_;
+ source_resource->read_lock_fence->Set();
+ }
}
void ResourceProvider::WaitSyncPointIfNeeded(ResourceId id) {
@@ -2054,6 +2103,15 @@
resource->mailbox.set_sync_point(0);
}
+void ResourceProvider::WaitReadLockIfNeeded(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK_EQ(resource->exported_count, 0);
+ if (!resource->read_lock_fence.get())
+ return;
+
+ resource->read_lock_fence->Wait();
+}
+
GLint ResourceProvider::GetActiveTextureUnit(GLES2Interface* gl) {
GLint active_unit = 0;
gl->GetIntegerv(GL_ACTIVE_TEXTURE, &active_unit);
diff --git a/cc/resources/resource_provider.h b/cc/resources/resource_provider.h
index 5c69b0c..40103f6 100644
--- a/cc/resources/resource_provider.h
+++ b/cc/resources/resource_provider.h
@@ -304,6 +304,7 @@
ResourceProvider::Resource* resource_;
SkBitmap sk_bitmap_;
scoped_ptr<SkCanvas> sk_canvas_;
+ base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(ScopedWriteLockSoftware);
};
@@ -323,6 +324,7 @@
gfx::GpuMemoryBuffer* gpu_memory_buffer_;
gfx::Size size_;
ResourceFormat format_;
+ base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(ScopedWriteLockGpuMemoryBuffer);
};
@@ -338,6 +340,7 @@
private:
ResourceProvider* resource_provider_;
ResourceProvider::Resource* resource_;
+ base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(ScopedWriteLockGr);
};
@@ -348,6 +351,7 @@
virtual void Set() = 0;
virtual bool HasPassed() = 0;
+ virtual void Wait() = 0;
protected:
friend class base::RefCounted<Fence>;
@@ -357,6 +361,29 @@
DISALLOW_COPY_AND_ASSIGN(Fence);
};
+ class SynchronousFence : public ResourceProvider::Fence {
+ public:
+ explicit SynchronousFence(gpu::gles2::GLES2Interface* gl);
+
+ // Overridden from Fence:
+ void Set() override;
+ bool HasPassed() override;
+ void Wait() override;
+
+ // Returns true if fence has been set but not yet synchornized.
+ bool has_synchronized() const { return has_synchronized_; }
+
+ private:
+ ~SynchronousFence() override;
+
+ void Synchronize();
+
+ gpu::gles2::GLES2Interface* gl_;
+ bool has_synchronized_;
+
+ DISALLOW_COPY_AND_ASSIGN(SynchronousFence);
+ };
+
// Acquire pixel buffer for resource. The pixel buffer can be used to
// set resource pixels without performing unnecessary copying.
void AcquirePixelBuffer(ResourceId resource);
@@ -391,6 +418,8 @@
void WaitSyncPointIfNeeded(ResourceId id);
+ void WaitReadLockIfNeeded(ResourceId id);
+
static GLint GetActiveTextureUnit(gpu::gles2::GLES2Interface* gl);
private:
@@ -560,6 +589,8 @@
scoped_ptr<IdAllocator> buffer_id_allocator_;
bool use_sync_query_;
+ // Fence used for CopyResource if CHROMIUM_sync_query is not supported.
+ scoped_refptr<SynchronousFence> synchronous_fence_;
DISALLOW_COPY_AND_ASSIGN(ResourceProvider);
};
diff --git a/cc/resources/tile_manager.cc b/cc/resources/tile_manager.cc
index 86874e7..c785cf6 100644
--- a/cc/resources/tile_manager.cc
+++ b/cc/resources/tile_manager.cc
@@ -541,7 +541,7 @@
// If this operation becomes expensive too, only do this after some
// resource(s) was returned. Note that in that case, one also need to
// invalidate when releasing some resource from the pool.
- resource_pool_->CheckBusyResources();
+ resource_pool_->CheckBusyResources(false);
// Now give memory out to the tiles until we're out, and build
// the needs-to-be-rasterized queue.
diff --git a/cc/surfaces/display.cc b/cc/surfaces/display.cc
index f50e87c..129a116 100644
--- a/cc/surfaces/display.cc
+++ b/cc/surfaces/display.cc
@@ -29,6 +29,7 @@
manager_(manager),
bitmap_manager_(bitmap_manager),
gpu_memory_buffer_manager_(gpu_memory_buffer_manager),
+ device_scale_factor_(1.f),
blocking_main_thread_task_runner_(
BlockingTaskRunner::Create(base::MessageLoopProxy::current())),
texture_mailbox_deleter_(
@@ -45,9 +46,12 @@
return output_surface_->BindToClient(this);
}
-void Display::Resize(SurfaceId id, const gfx::Size& size) {
+void Display::Resize(SurfaceId id,
+ const gfx::Size& size,
+ float device_scale_factor) {
current_surface_id_ = id;
current_surface_size_ = size;
+ device_scale_factor_ = device_scale_factor;
client_->DisplayDamaged();
}
@@ -115,18 +119,13 @@
benchmark_instrumentation::IssueDisplayRenderingStatsEvent();
DelegatedFrameData* frame_data = frame->delegated_frame_data.get();
- // Only reshape when we know we are going to draw. Otherwise, the reshape
- // can leave the window at the wrong size if we never draw and the proper
- // viewport size is never set.
- output_surface_->Reshape(current_surface_size_, 1.f);
- float device_scale_factor = 1.0f;
gfx::Rect device_viewport_rect = gfx::Rect(current_surface_size_);
gfx::Rect device_clip_rect = device_viewport_rect;
bool disable_picture_quad_image_filtering = false;
renderer_->DecideRenderPassAllocationsForFrame(frame_data->render_pass_list);
renderer_->DrawFrame(&frame_data->render_pass_list,
- device_scale_factor,
+ device_scale_factor_,
device_viewport_rect,
device_clip_rect,
disable_picture_quad_image_filtering);
diff --git a/cc/surfaces/display.h b/cc/surfaces/display.h
index 2346a40..5f80d64 100644
--- a/cc/surfaces/display.h
+++ b/cc/surfaces/display.h
@@ -46,7 +46,12 @@
~Display() override;
bool Initialize(scoped_ptr<OutputSurface> output_surface);
- void Resize(SurfaceId id, const gfx::Size& new_size);
+
+ // device_scale_factor is used to communicate to the external window system
+ // what scale this was rendered at.
+ void Resize(SurfaceId id,
+ const gfx::Size& new_size,
+ float device_scale_factor);
bool Draw();
SurfaceId CurrentSurfaceId();
@@ -88,6 +93,7 @@
gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager_;
SurfaceId current_surface_id_;
gfx::Size current_surface_size_;
+ float device_scale_factor_;
LayerTreeSettings settings_;
scoped_ptr<OutputSurface> output_surface_;
scoped_ptr<ResourceProvider> resource_provider_;
diff --git a/cc/surfaces/surface.cc b/cc/surfaces/surface.cc
index 07c5ae6..32e61c5 100644
--- a/cc/surfaces/surface.cc
+++ b/cc/surfaces/surface.cc
@@ -4,6 +4,8 @@
#include "cc/surfaces/surface.h"
+#include <algorithm>
+
#include "cc/output/compositor_frame.h"
#include "cc/output/copy_output_request.h"
#include "cc/surfaces/surface_factory.h"
@@ -71,7 +73,7 @@
std::multimap<RenderPassId, CopyOutputRequest*>* copy_requests) {
DCHECK(copy_requests->empty());
if (current_frame_) {
- for (auto* render_pass :
+ for (const auto& render_pass :
current_frame_->delegated_frame_data->render_pass_list) {
while (!render_pass->copy_requests.empty()) {
scoped_ptr<CopyOutputRequest> request =
@@ -111,9 +113,9 @@
void Surface::ClearCopyRequests() {
if (current_frame_) {
- for (auto* render_pass :
+ for (const auto& render_pass :
current_frame_->delegated_frame_data->render_pass_list) {
- for (auto* copy_request : render_pass->copy_requests)
+ for (const auto& copy_request : render_pass->copy_requests)
copy_request->SendEmptyResult();
}
}
diff --git a/cc/surfaces/surface_aggregator.cc b/cc/surfaces/surface_aggregator.cc
index ae0fce0..44afaff 100644
--- a/cc/surfaces/surface_aggregator.cc
+++ b/cc/surfaces/surface_aggregator.cc
@@ -4,6 +4,8 @@
#include "cc/surfaces/surface_aggregator.h"
+#include <map>
+
#include "base/bind.h"
#include "base/containers/hash_tables.h"
#include "base/debug/trace_event.h"
@@ -139,9 +141,9 @@
&invalid_frame,
provider_->GetChildToParentMap(child_id),
&referenced_resources);
- for (auto* render_pass : *render_pass_list) {
- for (auto& quad : render_pass->quad_list)
- quad.IterateResources(remap);
+ for (const auto& render_pass : *render_pass_list) {
+ for (const auto& quad : render_pass->quad_list)
+ quad->IterateResources(remap);
}
if (!invalid_frame)
@@ -305,25 +307,24 @@
SharedQuadStateList::ConstIterator sqs_iter =
source_shared_quad_state_list.begin();
for (const auto& quad : source_quad_list) {
- while (quad.shared_quad_state != &*sqs_iter) {
+ while (quad->shared_quad_state != *sqs_iter) {
++sqs_iter;
DCHECK(sqs_iter != source_shared_quad_state_list.end());
}
- DCHECK_EQ(quad.shared_quad_state, &*sqs_iter);
+ DCHECK_EQ(quad->shared_quad_state, *sqs_iter);
- if (quad.material == DrawQuad::SURFACE_CONTENT) {
- const SurfaceDrawQuad* surface_quad =
- SurfaceDrawQuad::MaterialCast(&quad);
+ if (quad->material == DrawQuad::SURFACE_CONTENT) {
+ const SurfaceDrawQuad* surface_quad = SurfaceDrawQuad::MaterialCast(quad);
HandleSurfaceQuad(surface_quad, dest_pass);
} else {
- if (quad.shared_quad_state != last_copied_source_shared_quad_state) {
+ if (quad->shared_quad_state != last_copied_source_shared_quad_state) {
CopySharedQuadState(
- quad.shared_quad_state, content_to_target_transform, dest_pass);
- last_copied_source_shared_quad_state = quad.shared_quad_state;
+ quad->shared_quad_state, content_to_target_transform, dest_pass);
+ last_copied_source_shared_quad_state = quad->shared_quad_state;
}
- if (quad.material == DrawQuad::RENDER_PASS) {
+ if (quad->material == DrawQuad::RENDER_PASS) {
const RenderPassDrawQuad* pass_quad =
- RenderPassDrawQuad::MaterialCast(&quad);
+ RenderPassDrawQuad::MaterialCast(quad);
RenderPassId original_pass_id = pass_quad->render_pass_id;
RenderPassId remapped_pass_id =
RemapPassId(original_pass_id, surface_id);
@@ -334,7 +335,7 @@
remapped_pass_id);
} else {
dest_pass->CopyFromAndAppendDrawQuad(
- &quad, dest_pass->shared_quad_state_list.back());
+ quad, dest_pass->shared_quad_state_list.back());
}
}
}
diff --git a/cc/surfaces/surface_aggregator_test_helpers.cc b/cc/surfaces/surface_aggregator_test_helpers.cc
index a40b2c2..6b527c0 100644
--- a/cc/surfaces/surface_aggregator_test_helpers.cc
+++ b/cc/surfaces/surface_aggregator_test_helpers.cc
@@ -138,7 +138,7 @@
for (auto iter = pass->quad_list.cbegin(); iter != pass->quad_list.cend();
++iter) {
SCOPED_TRACE(base::StringPrintf("Quad number %" PRIuS, iter.index()));
- TestQuadMatchesExpectations(expected_pass.quads[iter.index()], &*iter);
+ TestQuadMatchesExpectations(expected_pass.quads[iter.index()], *iter);
}
}
diff --git a/cc/test/DEPS b/cc/test/DEPS
index 5146b46..87f889e 100644
--- a/cc/test/DEPS
+++ b/cc/test/DEPS
@@ -4,5 +4,6 @@
"+gpu/command_buffer/client/gles2_interface_stub.h",
"+gpu/command_buffer/client/gles2_lib.h",
"+gpu/command_buffer/common/gles2_cmd_utils.h",
+ "+gpu/command_buffer/service/image_factory.h",
"+gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h",
]
diff --git a/cc/test/fake_delegated_renderer_layer_impl.cc b/cc/test/fake_delegated_renderer_layer_impl.cc
index 8548843..f38d5f6 100644
--- a/cc/test/fake_delegated_renderer_layer_impl.cc
+++ b/cc/test/fake_delegated_renderer_layer_impl.cc
@@ -62,9 +62,9 @@
DrawQuad::ResourceIteratorCallback add_resource_to_frame_callback =
base::Bind(&AddResourceToFrame, resource_provider, delegated_frame.get());
- for (auto* pass : delegated_frame->render_pass_list) {
- for (auto& quad : pass->quad_list)
- quad.IterateResources(add_resource_to_frame_callback);
+ for (const auto& pass : delegated_frame->render_pass_list) {
+ for (const auto& quad : pass->quad_list)
+ quad->IterateResources(add_resource_to_frame_callback);
}
CreateChildIdIfNeeded(base::Bind(&NoopReturnCallback));
diff --git a/cc/test/fake_picture_layer_impl.h b/cc/test/fake_picture_layer_impl.h
index a3c69c5..4223811 100644
--- a/cc/test/fake_picture_layer_impl.h
+++ b/cc/test/fake_picture_layer_impl.h
@@ -58,6 +58,7 @@
using PictureLayerImpl::GetViewportForTilePriorityInContentSpace;
using PictureLayerImpl::SanityCheckTilingState;
using PictureLayerImpl::GetRecycledTwinLayer;
+ using PictureLayerImpl::UpdatePile;
using PictureLayerImpl::UpdateIdealScales;
using PictureLayerImpl::MaximumTilingContentsScale;
diff --git a/cc/test/layer_test_common.cc b/cc/test/layer_test_common.cc
index a6dfa9c..762e82d 100644
--- a/cc/test/layer_test_common.cc
+++ b/cc/test/layer_test_common.cc
@@ -69,17 +69,17 @@
// No quad should exist if it's fully occluded.
for (const auto& quad : quads) {
gfx::Rect target_visible_rect = MathUtil::MapEnclosingClippedRect(
- quad.quadTransform(), quad.visible_rect);
+ quad->quadTransform(), quad->visible_rect);
EXPECT_FALSE(occluded.Contains(target_visible_rect));
}
// Quads that are fully occluded on one axis only should be shrunken.
for (const auto& quad : quads) {
- DCHECK(quad.quadTransform().IsIdentityOrIntegerTranslation());
+ DCHECK(quad->quadTransform().IsIdentityOrIntegerTranslation());
gfx::Rect target_rect =
- MathUtil::MapEnclosingClippedRect(quad.quadTransform(), quad.rect);
+ MathUtil::MapEnclosingClippedRect(quad->quadTransform(), quad->rect);
gfx::Rect target_visible_rect = MathUtil::MapEnclosingClippedRect(
- quad.quadTransform(), quad.visible_rect);
+ quad->quadTransform(), quad->visible_rect);
bool fully_occluded_horizontal = target_rect.x() >= occluded.x() &&
target_rect.right() <= occluded.right();
@@ -89,10 +89,10 @@
target_rect.Intersects(occluded) &&
(fully_occluded_vertical || fully_occluded_horizontal);
if (!should_be_occluded) {
- EXPECT_EQ(quad.rect.ToString(), quad.visible_rect.ToString());
+ EXPECT_EQ(quad->rect.ToString(), quad->visible_rect.ToString());
} else {
- EXPECT_NE(quad.rect.ToString(), quad.visible_rect.ToString());
- EXPECT_TRUE(quad.rect.Contains(quad.visible_rect));
+ EXPECT_NE(quad->rect.ToString(), quad->visible_rect.ToString());
+ EXPECT_TRUE(quad->rect.Contains(quad->visible_rect));
++(*partially_occluded_count);
}
}
diff --git a/cc/test/layer_tree_pixel_resource_test.cc b/cc/test/layer_tree_pixel_resource_test.cc
new file mode 100644
index 0000000..fa2bbb8
--- /dev/null
+++ b/cc/test/layer_tree_pixel_resource_test.cc
@@ -0,0 +1,225 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/test/layer_tree_pixel_resource_test.h"
+
+#include "cc/layers/layer.h"
+#include "cc/resources/bitmap_raster_worker_pool.h"
+#include "cc/resources/gpu_raster_worker_pool.h"
+#include "cc/resources/one_copy_raster_worker_pool.h"
+#include "cc/resources/pixel_buffer_raster_worker_pool.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/zero_copy_raster_worker_pool.h"
+#include "cc/test/fake_output_surface.h"
+#include "gpu/GLES2/gl2extchromium.h"
+
+namespace cc {
+
+namespace {
+
+bool IsTestCaseSupported(PixelResourceTestCase test_case) {
+ switch (test_case) {
+ case SOFTWARE:
+ case GL_GPU_RASTER_2D_DRAW:
+ case GL_ZERO_COPY_2D_DRAW:
+ case GL_ZERO_COPY_RECT_DRAW:
+ case GL_ASYNC_UPLOAD_2D_DRAW:
+ return true;
+ case GL_ZERO_COPY_EXTERNAL_DRAW:
+ case GL_ONE_COPY_2D_STAGING_2D_DRAW:
+ case GL_ONE_COPY_RECT_STAGING_2D_DRAW:
+ case GL_ONE_COPY_EXTERNAL_STAGING_2D_DRAW:
+ // These should all be enabled in practice.
+ // TODO(reveman): one copy not supported in unit tests yet.
+ // TODO(enne): look into getting texture external oes enabled.
+ return false;
+ }
+
+ NOTREACHED();
+ return false;
+}
+
+} // namespace
+
+LayerTreeHostPixelResourceTest::LayerTreeHostPixelResourceTest(
+ PixelResourceTestCase test_case)
+ : staging_texture_target_(GL_INVALID_VALUE),
+ draw_texture_target_(GL_INVALID_VALUE),
+ resource_pool_option_(BITMAP_RASTER_WORKER_POOL),
+ test_case_(test_case) {
+ InitializeFromTestCase(test_case);
+}
+
+void LayerTreeHostPixelResourceTest::InitializeFromTestCase(
+ PixelResourceTestCase test_case) {
+ switch (test_case) {
+ case SOFTWARE:
+ test_type_ = PIXEL_TEST_SOFTWARE;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_INVALID_VALUE;
+ resource_pool_option_ = BITMAP_RASTER_WORKER_POOL;
+ return;
+ case GL_GPU_RASTER_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_TEXTURE_2D;
+ resource_pool_option_ = GPU_RASTER_WORKER_POOL;
+ return;
+ case GL_ONE_COPY_2D_STAGING_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_TEXTURE_2D;
+ draw_texture_target_ = GL_TEXTURE_2D;
+ resource_pool_option_ = ONE_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ONE_COPY_RECT_STAGING_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_TEXTURE_RECTANGLE_ARB;
+ draw_texture_target_ = GL_TEXTURE_RECTANGLE_ARB;
+ resource_pool_option_ = ONE_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ONE_COPY_EXTERNAL_STAGING_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_TEXTURE_EXTERNAL_OES;
+ draw_texture_target_ = GL_TEXTURE_2D;
+ resource_pool_option_ = ONE_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ZERO_COPY_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_TEXTURE_2D;
+ resource_pool_option_ = ZERO_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ZERO_COPY_RECT_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_TEXTURE_RECTANGLE_ARB;
+ resource_pool_option_ = ZERO_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ZERO_COPY_EXTERNAL_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_TEXTURE_EXTERNAL_OES;
+ resource_pool_option_ = ZERO_COPY_RASTER_WORKER_POOL;
+ return;
+ case GL_ASYNC_UPLOAD_2D_DRAW:
+ test_type_ = PIXEL_TEST_GL;
+ staging_texture_target_ = GL_INVALID_VALUE;
+ draw_texture_target_ = GL_TEXTURE_2D;
+ resource_pool_option_ = PIXEL_BUFFER_RASTER_WORKER_POOL;
+ return;
+ }
+ NOTREACHED();
+}
+
+void LayerTreeHostPixelResourceTest::CreateResourceAndRasterWorkerPool(
+ LayerTreeHostImpl* host_impl,
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool) {
+ base::SingleThreadTaskRunner* task_runner =
+ proxy()->HasImplThread() ? proxy()->ImplThreadTaskRunner()
+ : proxy()->MainThreadTaskRunner();
+ DCHECK(task_runner);
+
+ ContextProvider* context_provider =
+ host_impl->output_surface()->context_provider();
+ ResourceProvider* resource_provider = host_impl->resource_provider();
+ bool use_distance_field_text = false;
+ size_t max_transfer_buffer_usage_bytes = 1024u * 1024u * 60u;
+
+ switch (resource_pool_option_) {
+ case BITMAP_RASTER_WORKER_POOL:
+ EXPECT_FALSE(context_provider);
+ EXPECT_EQ(PIXEL_TEST_SOFTWARE, test_type_);
+ *resource_pool =
+ ResourcePool::Create(resource_provider,
+ draw_texture_target_,
+ resource_provider->best_texture_format());
+
+ *raster_worker_pool =
+ BitmapRasterWorkerPool::Create(task_runner,
+ RasterWorkerPool::GetTaskGraphRunner(),
+ resource_provider);
+ break;
+ case GPU_RASTER_WORKER_POOL:
+ EXPECT_TRUE(context_provider);
+ EXPECT_EQ(PIXEL_TEST_GL, test_type_);
+ *resource_pool =
+ ResourcePool::Create(resource_provider,
+ draw_texture_target_,
+ resource_provider->best_texture_format());
+
+ *raster_worker_pool =
+ GpuRasterWorkerPool::Create(task_runner,
+ context_provider,
+ resource_provider,
+ use_distance_field_text);
+ break;
+ case ZERO_COPY_RASTER_WORKER_POOL:
+ EXPECT_TRUE(context_provider);
+ EXPECT_EQ(PIXEL_TEST_GL, test_type_);
+ EXPECT_TRUE(host_impl->CanUseZeroCopyRasterizer());
+ *resource_pool =
+ ResourcePool::Create(resource_provider,
+ draw_texture_target_,
+ resource_provider->best_texture_format());
+
+ *raster_worker_pool = ZeroCopyRasterWorkerPool::Create(
+ task_runner,
+ RasterWorkerPool::GetTaskGraphRunner(),
+ resource_provider);
+ break;
+ case ONE_COPY_RASTER_WORKER_POOL:
+ EXPECT_TRUE(context_provider);
+ EXPECT_EQ(PIXEL_TEST_GL, test_type_);
+ EXPECT_TRUE(host_impl->CanUseOneCopyRasterizer());
+ // We need to create a staging resource pool when using copy rasterizer.
+ *staging_resource_pool =
+ ResourcePool::Create(resource_provider,
+ staging_texture_target_,
+ resource_provider->best_texture_format());
+ *resource_pool =
+ ResourcePool::Create(resource_provider,
+ draw_texture_target_,
+ resource_provider->best_texture_format());
+
+ *raster_worker_pool = OneCopyRasterWorkerPool::Create(
+ task_runner,
+ RasterWorkerPool::GetTaskGraphRunner(),
+ context_provider,
+ resource_provider,
+ staging_resource_pool->get());
+ break;
+ case PIXEL_BUFFER_RASTER_WORKER_POOL:
+ EXPECT_TRUE(context_provider);
+ EXPECT_EQ(PIXEL_TEST_GL, test_type_);
+ *resource_pool = ResourcePool::Create(
+ resource_provider,
+ draw_texture_target_,
+ resource_provider->memory_efficient_texture_format());
+
+ *raster_worker_pool = PixelBufferRasterWorkerPool::Create(
+ task_runner,
+ RasterWorkerPool::GetTaskGraphRunner(),
+ context_provider,
+ resource_provider,
+ max_transfer_buffer_usage_bytes);
+ break;
+ }
+}
+
+void LayerTreeHostPixelResourceTest::RunPixelResourceTest(
+ scoped_refptr<Layer> content_root,
+ base::FilePath file_name) {
+ if (!IsTestCaseSupported(test_case_))
+ return;
+ RunPixelTest(test_type_, content_root, file_name);
+}
+
+ParameterizedPixelResourceTest::ParameterizedPixelResourceTest()
+ : LayerTreeHostPixelResourceTest(GetParam()) {
+}
+
+} // namespace cc
diff --git a/cc/test/layer_tree_pixel_resource_test.h b/cc/test/layer_tree_pixel_resource_test.h
new file mode 100644
index 0000000..da3da25
--- /dev/null
+++ b/cc/test/layer_tree_pixel_resource_test.h
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_TEST_LAYER_TREE_PIXEL_RESOURCE_TEST_H_
+#define CC_TEST_LAYER_TREE_PIXEL_RESOURCE_TEST_H_
+
+#include "base/memory/ref_counted.h"
+#include "cc/test/layer_tree_pixel_test.h"
+
+namespace cc {
+
+class LayerTreeHostImpl;
+class RasterWorkerPool;
+class ResourcePool;
+
+// Enumerate the various combinations of renderer, resource pool, staging
+// texture type, and drawing texture types. Not all of the combinations
+// are possible (or worth testing independently), so this is the minimal
+// list to hit all codepaths.
+enum PixelResourceTestCase {
+ SOFTWARE,
+ GL_GPU_RASTER_2D_DRAW,
+ GL_ONE_COPY_2D_STAGING_2D_DRAW,
+ GL_ONE_COPY_RECT_STAGING_2D_DRAW,
+ GL_ONE_COPY_EXTERNAL_STAGING_2D_DRAW,
+ GL_ZERO_COPY_2D_DRAW,
+ GL_ZERO_COPY_RECT_DRAW,
+ GL_ZERO_COPY_EXTERNAL_DRAW,
+ GL_ASYNC_UPLOAD_2D_DRAW,
+};
+
+class LayerTreeHostPixelResourceTest : public LayerTreePixelTest {
+ public:
+ explicit LayerTreeHostPixelResourceTest(PixelResourceTestCase test_case);
+
+ void CreateResourceAndRasterWorkerPool(
+ LayerTreeHostImpl* host_impl,
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool) override;
+
+ void RunPixelResourceTest(scoped_refptr<Layer> content_root,
+ base::FilePath file_name);
+
+ enum RasterWorkerPoolOption {
+ BITMAP_RASTER_WORKER_POOL,
+ GPU_RASTER_WORKER_POOL,
+ ZERO_COPY_RASTER_WORKER_POOL,
+ ONE_COPY_RASTER_WORKER_POOL,
+ PIXEL_BUFFER_RASTER_WORKER_POOL,
+ };
+
+ protected:
+ unsigned staging_texture_target_;
+ unsigned draw_texture_target_;
+ RasterWorkerPoolOption resource_pool_option_;
+
+ private:
+ void InitializeFromTestCase(PixelResourceTestCase test_case);
+
+ PixelResourceTestCase test_case_;
+};
+
+#define INSTANTIATE_PIXEL_RESOURCE_TEST_CASE_P(framework_name) \
+ INSTANTIATE_TEST_CASE_P( \
+ PixelResourceTest, \
+ framework_name, \
+ ::testing::Values(SOFTWARE, \
+ GL_GPU_RASTER_2D_DRAW, \
+ GL_ONE_COPY_2D_STAGING_2D_DRAW, \
+ GL_ONE_COPY_RECT_STAGING_2D_DRAW, \
+ GL_ONE_COPY_EXTERNAL_STAGING_2D_DRAW, \
+ GL_ZERO_COPY_2D_DRAW, \
+ GL_ZERO_COPY_RECT_DRAW, \
+ GL_ZERO_COPY_EXTERNAL_DRAW, \
+ GL_ASYNC_UPLOAD_2D_DRAW))
+
+class ParameterizedPixelResourceTest
+ : public LayerTreeHostPixelResourceTest,
+ public ::testing::WithParamInterface<PixelResourceTestCase> {
+ public:
+ ParameterizedPixelResourceTest();
+};
+
+} // namespace cc
+
+#endif // CC_TEST_LAYER_TREE_PIXEL_RESOURCE_TEST_H_
diff --git a/cc/test/layer_tree_pixel_test.cc b/cc/test/layer_tree_pixel_test.cc
index 370995e..ef0ba22 100644
--- a/cc/test/layer_tree_pixel_test.cc
+++ b/cc/test/layer_tree_pixel_test.cc
@@ -198,7 +198,20 @@
content_root_ = content_root;
readback_target_ = NULL;
ref_file_ = file_name;
- RunTest(true, false, impl_side_painting_);
+ bool threaded = true;
+ RunTest(threaded, false, impl_side_painting_);
+}
+
+void LayerTreePixelTest::RunSingleThreadedPixelTest(
+ PixelTestType test_type,
+ scoped_refptr<Layer> content_root,
+ base::FilePath file_name) {
+ test_type_ = test_type;
+ content_root_ = content_root;
+ readback_target_ = NULL;
+ ref_file_ = file_name;
+ bool threaded = false;
+ RunTest(threaded, false, impl_side_painting_);
}
void LayerTreePixelTest::RunPixelTestWithReadbackTarget(
diff --git a/cc/test/layer_tree_pixel_test.h b/cc/test/layer_tree_pixel_test.h
index e6f5cb7..c3c324a 100644
--- a/cc/test/layer_tree_pixel_test.h
+++ b/cc/test/layer_tree_pixel_test.h
@@ -30,6 +30,12 @@
class TextureMailbox;
class LayerTreePixelTest : public LayerTreeTest {
+ public:
+ enum PixelTestType {
+ PIXEL_TEST_GL,
+ PIXEL_TEST_SOFTWARE,
+ };
+
protected:
LayerTreePixelTest();
virtual ~LayerTreePixelTest();
@@ -58,15 +64,14 @@
scoped_refptr<TextureLayer> CreateTextureLayer(const gfx::Rect& rect,
const SkBitmap& bitmap);
- enum PixelTestType {
- PIXEL_TEST_GL,
- PIXEL_TEST_SOFTWARE,
- };
-
void RunPixelTest(PixelTestType type,
scoped_refptr<Layer> content_root,
base::FilePath file_name);
+ void RunSingleThreadedPixelTest(PixelTestType test_type,
+ scoped_refptr<Layer> content_root,
+ base::FilePath file_name);
+
void RunPixelTestWithReadbackTarget(PixelTestType type,
scoped_refptr<Layer> content_root,
Layer* target,
diff --git a/cc/test/layer_tree_test.cc b/cc/test/layer_tree_test.cc
index 8c0255a..d1d1962 100644
--- a/cc/test/layer_tree_test.cc
+++ b/cc/test/layer_tree_test.cc
@@ -44,6 +44,15 @@
return draw_result;
}
+void TestHooks::CreateResourceAndRasterWorkerPool(
+ LayerTreeHostImpl* host_impl,
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool) {
+ host_impl->LayerTreeHostImpl::CreateResourceAndRasterWorkerPool(
+ raster_worker_pool, resource_pool, staging_resource_pool);
+}
+
base::TimeDelta TestHooks::LowFrequencyAnimationInterval() const {
return base::TimeDelta::FromMilliseconds(16);
}
@@ -146,6 +155,14 @@
block_notify_ready_to_activate_for_testing_(false),
notify_ready_to_activate_was_blocked_(false) {}
+ void CreateResourceAndRasterWorkerPool(
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool) override {
+ test_hooks_->CreateResourceAndRasterWorkerPool(
+ this, raster_worker_pool, resource_pool, staging_resource_pool);
+ }
+
void WillBeginImplFrame(const BeginFrameArgs& args) override {
LayerTreeHostImpl::WillBeginImplFrame(args);
test_hooks_->WillBeginImplFrameOnThread(this, args);
@@ -399,7 +416,8 @@
};
LayerTreeTest::LayerTreeTest()
- : beginning_(false),
+ : output_surface_(nullptr),
+ beginning_(false),
end_when_begin_returns_(false),
timed_out_(false),
scheduled_(false),
@@ -514,6 +532,13 @@
main_thread_weak_ptr_));
}
+void LayerTreeTest::PostCompositeImmediatelyToMainThread() {
+ main_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LayerTreeTest::DispatchCompositeImmediately,
+ main_thread_weak_ptr_));
+}
+
void LayerTreeTest::WillBeginTest() {
layer_tree_host_->SetLayerTreeHostClientReady();
}
@@ -629,6 +654,12 @@
layer_tree_host_->SetNextCommitForcesRedraw();
}
+void LayerTreeTest::DispatchCompositeImmediately() {
+ DCHECK(!proxy() || proxy()->IsMainThread());
+ if (layer_tree_host_)
+ layer_tree_host_->Composite(gfx::FrameTime::Now());
+}
+
void LayerTreeTest::RunTest(bool threaded,
bool delegating_renderer,
bool impl_side_painting) {
diff --git a/cc/test/layer_tree_test.h b/cc/test/layer_tree_test.h
index 09ef1e5..36d669c 100644
--- a/cc/test/layer_tree_test.h
+++ b/cc/test/layer_tree_test.h
@@ -34,6 +34,11 @@
void ReadSettings(const LayerTreeSettings& settings);
+ virtual void CreateResourceAndRasterWorkerPool(
+ LayerTreeHostImpl* host_impl,
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool);
virtual void WillBeginImplFrameOnThread(LayerTreeHostImpl* host_impl,
const BeginFrameArgs& args) {}
virtual void BeginMainFrameAbortedOnThread(LayerTreeHostImpl* host_impl,
@@ -134,6 +139,7 @@
void PostSetNeedsRedrawRectToMainThread(const gfx::Rect& damage_rect);
void PostSetVisibleToMainThread(bool visible);
void PostSetNextCommitForcesRedrawToMainThread();
+ void PostCompositeImmediatelyToMainThread();
void DoBeginTest();
void Timeout();
@@ -154,6 +160,7 @@
void DispatchSetVisible(bool visible);
void DispatchSetNextCommitForcesRedraw();
void DispatchDidAddAnimation();
+ void DispatchCompositeImmediately();
virtual void AfterTest() = 0;
virtual void WillBeginTest();
diff --git a/cc/test/test_gpu_memory_buffer_manager.cc b/cc/test/test_gpu_memory_buffer_manager.cc
index 1f34988..caa146e 100644
--- a/cc/test/test_gpu_memory_buffer_manager.cc
+++ b/cc/test/test_gpu_memory_buffer_manager.cc
@@ -10,22 +10,39 @@
namespace cc {
namespace {
+size_t BytesPerPixel(gfx::GpuMemoryBuffer::Format format) {
+ switch (format) {
+ case gfx::GpuMemoryBuffer::RGBA_8888:
+ case gfx::GpuMemoryBuffer::RGBX_8888:
+ case gfx::GpuMemoryBuffer::BGRA_8888:
+ return 4;
+ }
+
+ NOTREACHED();
+ return 0;
+}
+
class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
public:
- GpuMemoryBufferImpl(const gfx::Size& size, Format format)
+ GpuMemoryBufferImpl(const gfx::Size& size,
+ Format format,
+ scoped_ptr<base::SharedMemory> shared_memory)
: size_(size),
format_(format),
- pixels_(new uint8[size.GetArea() * BytesPerPixel(format)]),
+ shared_memory_(shared_memory.Pass()),
mapped_(false) {}
// Overridden from gfx::GpuMemoryBuffer:
void* Map() override {
DCHECK(!mapped_);
+ if (!shared_memory_->Map(size_.GetArea() * BytesPerPixel(format_)))
+ return NULL;
mapped_ = true;
- return pixels_.get();
+ return shared_memory_->memory();
}
void Unmap() override {
DCHECK(mapped_);
+ shared_memory_->Unmap();
mapped_ = false;
}
bool IsMapped() const override { return mapped_; }
@@ -34,29 +51,19 @@
return size_.width() * BytesPerPixel(format_);
}
gfx::GpuMemoryBufferHandle GetHandle() const override {
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::SHARED_MEMORY_BUFFER;
+ handle.handle = shared_memory_->handle();
+ return handle;
}
ClientBuffer AsClientBuffer() override {
return reinterpret_cast<ClientBuffer>(this);
}
private:
- static size_t BytesPerPixel(Format format) {
- switch (format) {
- case RGBA_8888:
- case RGBX_8888:
- case BGRA_8888:
- return 4;
- }
-
- NOTREACHED();
- return 0;
- }
-
const gfx::Size size_;
gfx::GpuMemoryBuffer::Format format_;
- scoped_ptr<uint8[]> pixels_;
+ scoped_ptr<base::SharedMemory> shared_memory_;
bool mapped_;
};
@@ -73,8 +80,11 @@
const gfx::Size& size,
gfx::GpuMemoryBuffer::Format format,
gfx::GpuMemoryBuffer::Usage usage) {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory);
+ if (!shared_memory->CreateAnonymous(size.GetArea() * BytesPerPixel(format)))
+ return nullptr;
return make_scoped_ptr<gfx::GpuMemoryBuffer>(
- new GpuMemoryBufferImpl(size, format));
+ new GpuMemoryBufferImpl(size, format, shared_memory.Pass()));
}
gfx::GpuMemoryBuffer*
diff --git a/cc/test/test_gpu_memory_buffer_manager.h b/cc/test/test_gpu_memory_buffer_manager.h
index fd1c66a..11f80ba 100644
--- a/cc/test/test_gpu_memory_buffer_manager.h
+++ b/cc/test/test_gpu_memory_buffer_manager.h
@@ -21,6 +21,9 @@
gfx::GpuMemoryBuffer::Usage usage) override;
gfx::GpuMemoryBuffer* GpuMemoryBufferFromClientBuffer(
ClientBuffer buffer) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestGpuMemoryBufferManager);
};
} // namespace cc
diff --git a/cc/test/test_image_factory.cc b/cc/test/test_image_factory.cc
new file mode 100644
index 0000000..42e3502
--- /dev/null
+++ b/cc/test/test_image_factory.cc
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/test/test_image_factory.h"
+
+#include "ui/gl/gl_image_shared_memory.h"
+
+namespace cc {
+
+TestImageFactory::TestImageFactory() {
+}
+
+TestImageFactory::~TestImageFactory() {
+}
+
+scoped_refptr<gfx::GLImage> TestImageFactory::CreateImageForGpuMemoryBuffer(
+ const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ unsigned internalformat,
+ int client_id) {
+ DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
+
+ scoped_refptr<gfx::GLImageSharedMemory> image(
+ new gfx::GLImageSharedMemory(size, internalformat));
+ if (!image->Initialize(handle, format))
+ return nullptr;
+
+ return image;
+}
+
+} // namespace cc
diff --git a/cc/test/test_image_factory.h b/cc/test/test_image_factory.h
new file mode 100644
index 0000000..fcef390
--- /dev/null
+++ b/cc/test/test_image_factory.h
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_TEST_TEST_IMAGE_FACTORY_H_
+#define CC_TEST_TEST_IMAGE_FACTORY_H_
+
+#include "gpu/command_buffer/service/image_factory.h"
+
+namespace cc {
+
+class TestImageFactory : public gpu::ImageFactory {
+ public:
+ TestImageFactory();
+ ~TestImageFactory() override;
+
+ // Overridden from gpu::ImageFactory:
+ scoped_refptr<gfx::GLImage> CreateImageForGpuMemoryBuffer(
+ const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ unsigned internalformat,
+ int client_id) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestImageFactory);
+};
+
+} // namespace cc
+
+#endif // CC_TEST_TEST_IMAGE_FACTORY_H_
diff --git a/cc/test/test_in_process_context_provider.cc b/cc/test/test_in_process_context_provider.cc
index b4d580f..34b0493 100644
--- a/cc/test/test_in_process_context_provider.cc
+++ b/cc/test/test_in_process_context_provider.cc
@@ -20,7 +20,9 @@
namespace cc {
// static
-scoped_ptr<gpu::GLInProcessContext> CreateTestInProcessContext() {
+scoped_ptr<gpu::GLInProcessContext> CreateTestInProcessContext(
+ TestGpuMemoryBufferManager* gpu_memory_buffer_manager,
+ TestImageFactory* image_factory) {
const bool is_offscreen = true;
const bool share_resources = true;
gpu::gles2::ContextCreationAttribHelper attribs;
@@ -47,14 +49,22 @@
share_resources,
attribs,
gpu_preference,
- gpu::GLInProcessContextSharedMemoryLimits()));
+ gpu::GLInProcessContextSharedMemoryLimits(),
+ gpu_memory_buffer_manager,
+ image_factory));
DCHECK(context);
return context.Pass();
}
+scoped_ptr<gpu::GLInProcessContext> CreateTestInProcessContext() {
+ return CreateTestInProcessContext(nullptr, nullptr);
+}
+
TestInProcessContextProvider::TestInProcessContextProvider()
- : context_(CreateTestInProcessContext()) {}
+ : context_(CreateTestInProcessContext(&gpu_memory_buffer_manager_,
+ &image_factory_)) {
+}
TestInProcessContextProvider::~TestInProcessContextProvider() {
}
@@ -116,7 +126,11 @@
ContextProvider::Capabilities
TestInProcessContextProvider::ContextCapabilities() {
- return ContextProvider::Capabilities();
+ ContextProvider::Capabilities capabilities;
+ capabilities.gpu.image = true;
+ capabilities.gpu.texture_rectangle = true;
+
+ return capabilities;
}
bool TestInProcessContextProvider::IsContextLost() { return false; }
diff --git a/cc/test/test_in_process_context_provider.h b/cc/test/test_in_process_context_provider.h
index 6725ec0..0533ff4 100644
--- a/cc/test/test_in_process_context_provider.h
+++ b/cc/test/test_in_process_context_provider.h
@@ -6,6 +6,8 @@
#define CC_TEST_TEST_IN_PROCESS_CONTEXT_PROVIDER_H_
#include "cc/output/context_provider.h"
+#include "cc/test/test_gpu_memory_buffer_manager.h"
+#include "cc/test/test_image_factory.h"
#include "skia/ext/refptr.h"
class GrContext;
@@ -17,6 +19,9 @@
namespace cc {
scoped_ptr<gpu::GLInProcessContext> CreateTestInProcessContext();
+scoped_ptr<gpu::GLInProcessContext> CreateTestInProcessContext(
+ TestGpuMemoryBufferManager* gpu_memory_buffer_manager,
+ TestImageFactory* image_factory);
class TestInProcessContextProvider : public ContextProvider {
public:
@@ -42,6 +47,8 @@
~TestInProcessContextProvider() override;
private:
+ TestGpuMemoryBufferManager gpu_memory_buffer_manager_;
+ TestImageFactory image_factory_;
scoped_ptr<gpu::GLInProcessContext> context_;
skia::RefPtr<class GrContext> gr_context_;
};
diff --git a/cc/test/test_web_graphics_context_3d.h b/cc/test/test_web_graphics_context_3d.h
index eda6f0f..ff2c704 100644
--- a/cc/test/test_web_graphics_context_3d.h
+++ b/cc/test/test_web_graphics_context_3d.h
@@ -327,6 +327,9 @@
void set_support_sync_query(bool support) {
test_capabilities_.gpu.sync_query = support;
}
+ void set_support_image(bool support) {
+ test_capabilities_.gpu.image = support;
+ }
// When this context is lost, all contexts in its share group are also lost.
void add_share_group_context(TestWebGraphicsContext3D* context3d) {
diff --git a/cc/trees/layer_tree_host.cc b/cc/trees/layer_tree_host.cc
index b2c07da..3898156 100644
--- a/cc/trees/layer_tree_host.cc
+++ b/cc/trees/layer_tree_host.cc
@@ -1058,9 +1058,6 @@
}
void LayerTreeHost::ApplyScrollAndScale(ScrollAndScaleSet* info) {
- if (!root_layer_.get())
- return;
-
ScopedPtrVector<SwapPromise>::iterator it = info->swap_promises.begin();
for (; it != info->swap_promises.end(); ++it) {
scoped_ptr<SwapPromise> swap_promise(info->swap_promises.take(it));
@@ -1074,19 +1071,21 @@
gfx::Vector2d inner_viewport_scroll_delta;
gfx::Vector2d outer_viewport_scroll_delta;
- for (size_t i = 0; i < info->scrolls.size(); ++i) {
- Layer* layer = LayerTreeHostCommon::FindLayerInSubtree(
- root_layer_.get(), info->scrolls[i].layer_id);
- if (!layer)
- continue;
- if (layer == outer_viewport_scroll_layer_.get()) {
- outer_viewport_scroll_delta += info->scrolls[i].scroll_delta;
- } else if (layer == inner_viewport_scroll_layer_.get()) {
- inner_viewport_scroll_delta += info->scrolls[i].scroll_delta;
- } else {
- layer->SetScrollOffsetFromImplSide(
- gfx::ScrollOffsetWithDelta(layer->scroll_offset(),
- info->scrolls[i].scroll_delta));
+ if (root_layer_.get()) {
+ for (size_t i = 0; i < info->scrolls.size(); ++i) {
+ Layer* layer = LayerTreeHostCommon::FindLayerInSubtree(
+ root_layer_.get(), info->scrolls[i].layer_id);
+ if (!layer)
+ continue;
+ if (layer == outer_viewport_scroll_layer_.get()) {
+ outer_viewport_scroll_delta += info->scrolls[i].scroll_delta;
+ } else if (layer == inner_viewport_scroll_layer_.get()) {
+ inner_viewport_scroll_delta += info->scrolls[i].scroll_delta;
+ } else {
+ layer->SetScrollOffsetFromImplSide(
+ gfx::ScrollOffsetWithDelta(layer->scroll_offset(),
+ info->scrolls[i].scroll_delta));
+ }
}
}
@@ -1094,18 +1093,16 @@
!outer_viewport_scroll_delta.IsZero() ||
info->page_scale_delta != 1.f ||
info->top_controls_delta) {
- // SetScrollOffsetFromImplSide above could have destroyed the tree,
- // so re-get this layer before doing anything to it.
-
- DCHECK(inner_viewport_scroll_layer_.get()); // We should always have this.
-
// Preemptively apply the scroll offset and scale delta here before sending
// it to the client. If the client comes back and sets it to the same
// value, then the layer can early out without needing a full commit.
- inner_viewport_scroll_layer_->SetScrollOffsetFromImplSide(
- gfx::ScrollOffsetWithDelta(
- inner_viewport_scroll_layer_->scroll_offset(),
- inner_viewport_scroll_delta));
+ if (inner_viewport_scroll_layer_.get()) {
+ inner_viewport_scroll_layer_->SetScrollOffsetFromImplSide(
+ gfx::ScrollOffsetWithDelta(
+ inner_viewport_scroll_layer_->scroll_offset(),
+ inner_viewport_scroll_delta));
+ }
+
if (outer_viewport_scroll_layer_.get()) {
outer_viewport_scroll_layer_->SetScrollOffsetFromImplSide(
gfx::ScrollOffsetWithDelta(
@@ -1114,7 +1111,7 @@
}
ApplyPageScaleDeltaFromImplSide(info->page_scale_delta);
- if (!outer_viewport_scroll_layer_.get()) {
+ if (!settings_.use_pinch_virtual_viewport) {
client_->ApplyViewportDeltas(
inner_viewport_scroll_delta + outer_viewport_scroll_delta,
info->page_scale_delta,
diff --git a/cc/trees/layer_tree_host_impl.cc b/cc/trees/layer_tree_host_impl.cc
index be9fd58..77c484f 100644
--- a/cc/trees/layer_tree_host_impl.cc
+++ b/cc/trees/layer_tree_host_impl.cc
@@ -909,9 +909,9 @@
draw_result = DRAW_SUCCESS;
#if DCHECK_IS_ON
- for (auto* render_pass : frame->render_passes) {
- for (auto& quad : render_pass->quad_list)
- DCHECK(quad.shared_quad_state);
+ for (const auto& render_pass : frame->render_passes) {
+ for (const auto& quad : render_pass->quad_list)
+ DCHECK(quad->shared_quad_state);
DCHECK(frame->render_passes_by_id.find(render_pass->id) !=
frame->render_passes_by_id.end());
}
@@ -1029,11 +1029,10 @@
// Now follow up for all RenderPass quads and remove their RenderPasses
// recursively.
const QuadList& quad_list = removed_pass->quad_list;
- QuadList::ConstBackToFrontIterator quad_list_iterator =
- quad_list.BackToFrontBegin();
- for (; quad_list_iterator != quad_list.BackToFrontEnd();
+ for (auto quad_list_iterator = quad_list.BackToFrontBegin();
+ quad_list_iterator != quad_list.BackToFrontEnd();
++quad_list_iterator) {
- const DrawQuad* current_quad = &*quad_list_iterator;
+ const DrawQuad* current_quad = *quad_list_iterator;
if (current_quad->material != DrawQuad::RENDER_PASS)
continue;
@@ -1052,11 +1051,10 @@
// If any quad or RenderPass draws into this RenderPass, then keep it.
const QuadList& quad_list = render_pass->quad_list;
- for (QuadList::ConstBackToFrontIterator quad_list_iterator =
- quad_list.BackToFrontBegin();
+ for (auto quad_list_iterator = quad_list.BackToFrontBegin();
quad_list_iterator != quad_list.BackToFrontEnd();
++quad_list_iterator) {
- const DrawQuad* current_quad = &*quad_list_iterator;
+ const DrawQuad* current_quad = *quad_list_iterator;
if (current_quad->material != DrawQuad::RENDER_PASS)
return false;
@@ -1083,12 +1081,11 @@
it = culler.RenderPassListNext(it)) {
const RenderPass* current_pass = frame->render_passes[it];
const QuadList& quad_list = current_pass->quad_list;
- QuadList::ConstBackToFrontIterator quad_list_iterator =
- quad_list.BackToFrontBegin();
- for (; quad_list_iterator != quad_list.BackToFrontEnd();
+ for (auto quad_list_iterator = quad_list.BackToFrontBegin();
+ quad_list_iterator != quad_list.BackToFrontEnd();
++quad_list_iterator) {
- const DrawQuad* current_quad = &*quad_list_iterator;
+ const DrawQuad* current_quad = *quad_list_iterator;
if (current_quad->material != DrawQuad::RENDER_PASS)
continue;
@@ -1235,7 +1232,7 @@
100);
DCHECK(resource_pool_);
- resource_pool_->CheckBusyResources();
+ resource_pool_->CheckBusyResources(false);
// Soft limit is used for resource pool such that memory returns to soft
// limit after going over.
resource_pool_->SetResourceUsageLimits(
@@ -1245,7 +1242,7 @@
// Release all staging resources when invisible.
if (staging_resource_pool_) {
- staging_resource_pool_->CheckBusyResources();
+ staging_resource_pool_->CheckBusyResources(false);
staging_resource_pool_->SetResourceUsageLimits(
std::numeric_limits<size_t>::max(),
std::numeric_limits<size_t>::max(),
@@ -1457,7 +1454,7 @@
if (tile_manager_) {
DCHECK(resource_pool_);
- resource_pool_->CheckBusyResources();
+ resource_pool_->CheckBusyResources(false);
resource_pool_->ReduceResourceUsage();
}
// If we're not visible, we likely released resources, so we want to
@@ -1736,6 +1733,12 @@
SetFullRootLayerDamage();
}
+void LayerTreeHostImpl::SynchronouslyInitializeAllTiles() {
+ // Only valid for the single-threaded non-scheduled/synchronous case
+ // using the zero copy raster worker pool.
+ single_thread_synchronous_task_graph_runner_->RunUntilIdle();
+}
+
void LayerTreeHostImpl::DidLoseOutputSurface() {
if (resource_provider_)
resource_provider_->DidLoseOutputSurface();
@@ -2000,67 +2003,105 @@
DCHECK(settings_.impl_side_painting);
DCHECK(output_surface_);
DCHECK(resource_provider_);
+
+ CreateResourceAndRasterWorkerPool(
+ &raster_worker_pool_, &resource_pool_, &staging_resource_pool_);
+ DCHECK(raster_worker_pool_);
+ DCHECK(resource_pool_);
+
+ base::SingleThreadTaskRunner* task_runner =
+ proxy_->HasImplThread() ? proxy_->ImplThreadTaskRunner()
+ : proxy_->MainThreadTaskRunner();
+ DCHECK(task_runner);
+ size_t scheduled_raster_task_limit =
+ IsSynchronousSingleThreaded() ? std::numeric_limits<size_t>::max()
+ : settings_.scheduled_raster_task_limit;
+ tile_manager_ = TileManager::Create(this,
+ task_runner,
+ resource_pool_.get(),
+ raster_worker_pool_->AsRasterizer(),
+ rendering_stats_instrumentation_,
+ scheduled_raster_task_limit);
+
+ UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy());
+ need_to_update_visible_tiles_before_draw_ = false;
+}
+
+void LayerTreeHostImpl::CreateResourceAndRasterWorkerPool(
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool) {
base::SingleThreadTaskRunner* task_runner =
proxy_->HasImplThread() ? proxy_->ImplThreadTaskRunner()
: proxy_->MainThreadTaskRunner();
DCHECK(task_runner);
ContextProvider* context_provider = output_surface_->context_provider();
+ bool should_use_zero_copy_rasterizer =
+ settings_.use_zero_copy || IsSynchronousSingleThreaded();
+
if (!context_provider) {
- resource_pool_ =
+ *resource_pool =
ResourcePool::Create(resource_provider_.get(),
GL_TEXTURE_2D,
resource_provider_->best_texture_format());
- raster_worker_pool_ =
- BitmapRasterWorkerPool::Create(proxy_->ImplThreadTaskRunner(),
+ *raster_worker_pool =
+ BitmapRasterWorkerPool::Create(task_runner,
RasterWorkerPool::GetTaskGraphRunner(),
resource_provider_.get());
} else if (use_gpu_rasterization_) {
- resource_pool_ =
+ *resource_pool =
ResourcePool::Create(resource_provider_.get(),
GL_TEXTURE_2D,
resource_provider_->best_texture_format());
- raster_worker_pool_ =
+ *raster_worker_pool =
GpuRasterWorkerPool::Create(task_runner,
context_provider,
resource_provider_.get(),
settings_.use_distance_field_text);
- } else if (UseZeroCopyRasterizer()) {
- resource_pool_ = ResourcePool::Create(
+ } else if (should_use_zero_copy_rasterizer && CanUseZeroCopyRasterizer()) {
+ *resource_pool = ResourcePool::Create(
resource_provider_.get(),
GetMapImageTextureTarget(context_provider->ContextCapabilities()),
resource_provider_->best_texture_format());
- raster_worker_pool_ =
- ZeroCopyRasterWorkerPool::Create(proxy_->ImplThreadTaskRunner(),
- RasterWorkerPool::GetTaskGraphRunner(),
- resource_provider_.get());
- } else if (UseOneCopyRasterizer()) {
+ TaskGraphRunner* task_graph_runner;
+ if (IsSynchronousSingleThreaded()) {
+ DCHECK(!single_thread_synchronous_task_graph_runner_);
+ single_thread_synchronous_task_graph_runner_.reset(new TaskGraphRunner);
+ task_graph_runner = single_thread_synchronous_task_graph_runner_.get();
+ } else {
+ task_graph_runner = RasterWorkerPool::GetTaskGraphRunner();
+ }
+
+ *raster_worker_pool = ZeroCopyRasterWorkerPool::Create(
+ task_runner, task_graph_runner, resource_provider_.get());
+ } else if (settings_.use_one_copy && CanUseOneCopyRasterizer()) {
// We need to create a staging resource pool when using copy rasterizer.
- staging_resource_pool_ = ResourcePool::Create(
+ *staging_resource_pool = ResourcePool::Create(
resource_provider_.get(),
GetMapImageTextureTarget(context_provider->ContextCapabilities()),
resource_provider_->best_texture_format());
- resource_pool_ =
+ *resource_pool =
ResourcePool::Create(resource_provider_.get(),
GL_TEXTURE_2D,
resource_provider_->best_texture_format());
- raster_worker_pool_ =
+ *raster_worker_pool =
OneCopyRasterWorkerPool::Create(task_runner,
RasterWorkerPool::GetTaskGraphRunner(),
context_provider,
resource_provider_.get(),
staging_resource_pool_.get());
} else {
- resource_pool_ = ResourcePool::Create(
+ *resource_pool = ResourcePool::Create(
resource_provider_.get(),
GL_TEXTURE_2D,
resource_provider_->memory_efficient_texture_format());
- raster_worker_pool_ = PixelBufferRasterWorkerPool::Create(
+ *raster_worker_pool = PixelBufferRasterWorkerPool::Create(
task_runner,
RasterWorkerPool::GetTaskGraphRunner(),
context_provider,
@@ -2068,16 +2109,6 @@
GetMaxTransferBufferUsageBytes(context_provider->ContextCapabilities(),
settings_.refresh_rate));
}
-
- tile_manager_ = TileManager::Create(this,
- task_runner,
- resource_pool_.get(),
- raster_worker_pool_->AsRasterizer(),
- rendering_stats_instrumentation_,
- settings().scheduled_raster_task_limit);
-
- UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy());
- need_to_update_visible_tiles_before_draw_ = false;
}
void LayerTreeHostImpl::DestroyTileManager() {
@@ -2085,6 +2116,7 @@
resource_pool_ = nullptr;
staging_resource_pool_ = nullptr;
raster_worker_pool_ = nullptr;
+ single_thread_synchronous_task_graph_runner_ = nullptr;
}
bool LayerTreeHostImpl::UsePendingTreeForSync() const {
@@ -2093,13 +2125,17 @@
return settings_.impl_side_painting;
}
-bool LayerTreeHostImpl::UseZeroCopyRasterizer() const {
- return settings_.use_zero_copy && GetRendererCapabilities().using_image;
+bool LayerTreeHostImpl::IsSynchronousSingleThreaded() const {
+ return !proxy_->HasImplThread() && !settings_.single_thread_proxy_scheduler;
}
-bool LayerTreeHostImpl::UseOneCopyRasterizer() const {
+bool LayerTreeHostImpl::CanUseZeroCopyRasterizer() const {
+ return GetRendererCapabilities().using_image;
+}
+
+bool LayerTreeHostImpl::CanUseOneCopyRasterizer() const {
// Sync query support is required by one-copy rasterizer.
- return settings_.use_one_copy && GetRendererCapabilities().using_image &&
+ return GetRendererCapabilities().using_image &&
resource_provider_->use_sync_query();
}
@@ -2997,7 +3033,8 @@
static void CollectScrollDeltas(ScrollAndScaleSet* scroll_info,
LayerImpl* layer_impl) {
- DCHECK(layer_impl);
+ if (!layer_impl)
+ return;
gfx::Vector2d scroll_delta =
gfx::ToFlooredVector2d(layer_impl->ScrollDelta());
@@ -3016,15 +3053,12 @@
scoped_ptr<ScrollAndScaleSet> LayerTreeHostImpl::ProcessScrollDeltas() {
scoped_ptr<ScrollAndScaleSet> scroll_info(new ScrollAndScaleSet());
- if (active_tree_->root_layer()) {
- CollectScrollDeltas(scroll_info.get(), active_tree_->root_layer());
- scroll_info->page_scale_delta = active_tree_->page_scale_delta();
- active_tree_->set_sent_page_scale_delta(scroll_info->page_scale_delta);
- scroll_info->swap_promises.swap(
- swap_promises_for_main_thread_scroll_update_);
- scroll_info->top_controls_delta = active_tree()->top_controls_delta();
- active_tree_->set_sent_top_controls_delta(scroll_info->top_controls_delta);
- }
+ CollectScrollDeltas(scroll_info.get(), active_tree_->root_layer());
+ scroll_info->page_scale_delta = active_tree_->page_scale_delta();
+ active_tree_->set_sent_page_scale_delta(scroll_info->page_scale_delta);
+ scroll_info->swap_promises.swap(swap_promises_for_main_thread_scroll_update_);
+ scroll_info->top_controls_delta = active_tree()->top_controls_delta();
+ active_tree_->set_sent_top_controls_delta(scroll_info->top_controls_delta);
return scroll_info.Pass();
}
diff --git a/cc/trees/layer_tree_host_impl.h b/cc/trees/layer_tree_host_impl.h
index c1e9d1b..157fe00 100644
--- a/cc/trees/layer_tree_host_impl.h
+++ b/cc/trees/layer_tree_host_impl.h
@@ -482,6 +482,16 @@
void ResetRequiresHighResToDraw() { requires_high_res_to_draw_ = false; }
bool RequiresHighResToDraw() const { return requires_high_res_to_draw_; }
+ // Only valid for synchronous (non-scheduled) single-threaded case.
+ void SynchronouslyInitializeAllTiles();
+
+ bool CanUseZeroCopyRasterizer() const;
+ bool CanUseOneCopyRasterizer() const;
+ virtual void CreateResourceAndRasterWorkerPool(
+ scoped_ptr<RasterWorkerPool>* raster_worker_pool,
+ scoped_ptr<ResourcePool>* resource_pool,
+ scoped_ptr<ResourcePool>* staging_resource_pool);
+
protected:
LayerTreeHostImpl(
const LayerTreeSettings& settings,
@@ -496,8 +506,6 @@
// Virtual for testing.
virtual void AnimateLayers(base::TimeTicks monotonic_time);
-
- // Virtual for testing.
virtual base::TimeDelta LowFrequencyAnimationInterval() const;
const AnimationRegistrar::AnimationControllerMap&
@@ -518,8 +526,7 @@
void EnforceZeroBudget(bool zero_budget);
bool UsePendingTreeForSync() const;
- bool UseZeroCopyRasterizer() const;
- bool UseOneCopyRasterizer() const;
+ bool IsSynchronousSingleThreaded() const;
// Scroll by preferring to move the outer viewport first, only moving the
// inner if the outer is at its scroll extents.
@@ -693,6 +700,7 @@
RenderingStatsInstrumentation* rendering_stats_instrumentation_;
MicroBenchmarkControllerImpl micro_benchmark_controller_;
+ scoped_ptr<TaskGraphRunner> single_thread_synchronous_task_graph_runner_;
bool need_to_update_visible_tiles_before_draw_;
diff --git a/cc/trees/layer_tree_host_impl_unittest.cc b/cc/trees/layer_tree_host_impl_unittest.cc
index 8e7fa0f..7d39b9d 100644
--- a/cc/trees/layer_tree_host_impl_unittest.cc
+++ b/cc/trees/layer_tree_host_impl_unittest.cc
@@ -2370,28 +2370,6 @@
LayerTreeSettings settings_;
}; // class LayerTreeHostImplTopControlsTest
-TEST_F(LayerTreeHostImplTopControlsTest,
- TopControlsDeltaOnlySentWithRootLayer) {
- CreateHostImpl(settings_, CreateOutputSurface());
-
- host_impl_->active_tree()->set_top_controls_delta(-20.f);
-
- // Because LTH::ApplyScrollAndScale doesn't know what to do with a scroll
- // delta packet when the root layer doesn't exist yet, make sure not to set
- // sent_top_controls_delta either to avoid the delta getting clobbered on the
- // next commit.
- scoped_ptr<ScrollAndScaleSet> scroll_info = host_impl_->ProcessScrollDeltas();
- EXPECT_EQ(scroll_info->top_controls_delta, 0.f);
- EXPECT_EQ(host_impl_->active_tree()->sent_top_controls_delta(), 0.f);
-
- SetupTopControlsAndScrollLayer();
-
- // After the root layer exists, it should be set normally.
- scroll_info = host_impl_->ProcessScrollDeltas();
- EXPECT_EQ(scroll_info->top_controls_delta, -20.f);
- EXPECT_EQ(host_impl_->active_tree()->sent_top_controls_delta(), -20.f);
-}
-
TEST_F(LayerTreeHostImplTopControlsTest, ScrollTopControlsByFractionalAmount) {
SetupTopControlsAndScrollLayerWithVirtualViewport(
gfx::Size(10, 10), gfx::Size(10, 10), gfx::Size(10, 10));
@@ -4423,7 +4401,7 @@
size_t CountGutterQuads(const QuadList& quad_list) {
size_t num_gutter_quads = 0;
for (const auto& quad : quad_list) {
- num_gutter_quads += (quad.material == gutter_quad_material_) ? 1 : 0;
+ num_gutter_quads += (quad->material == gutter_quad_material_) ? 1 : 0;
}
return num_gutter_quads;
}
@@ -4436,10 +4414,9 @@
// Make sure that the texture coordinates match their expectations.
void ValidateTextureDrawQuads(const QuadList& quad_list) {
for (const auto& quad : quad_list) {
- if (quad.material != DrawQuad::TEXTURE_CONTENT)
+ if (quad->material != DrawQuad::TEXTURE_CONTENT)
continue;
- const TextureDrawQuad* texture_quad =
- TextureDrawQuad::MaterialCast(&quad);
+ const TextureDrawQuad* texture_quad = TextureDrawQuad::MaterialCast(quad);
gfx::SizeF gutter_texture_size_pixels = gfx::ScaleSize(
gutter_texture_size_, host_impl_->device_scale_factor());
EXPECT_EQ(texture_quad->uv_top_left.x(),
diff --git a/cc/trees/layer_tree_host_pixeltest_masks.cc b/cc/trees/layer_tree_host_pixeltest_masks.cc
index 1b60eae..f4f174c 100644
--- a/cc/trees/layer_tree_host_pixeltest_masks.cc
+++ b/cc/trees/layer_tree_host_pixeltest_masks.cc
@@ -7,7 +7,7 @@
#include "cc/layers/picture_image_layer.h"
#include "cc/layers/picture_layer.h"
#include "cc/layers/solid_color_layer.h"
-#include "cc/test/layer_tree_pixel_test.h"
+#include "cc/test/layer_tree_pixel_resource_test.h"
#include "cc/test/pixel_comparator.h"
#if !defined(OS_ANDROID)
@@ -15,7 +15,9 @@
namespace cc {
namespace {
-class LayerTreeHostMasksPixelTest : public LayerTreePixelTest {};
+typedef ParameterizedPixelResourceTest LayerTreeHostMasksPixelTest;
+
+INSTANTIATE_PIXEL_RESOURCE_TEST_CASE_P(LayerTreeHostMasksPixelTest);
class MaskContentLayerClient : public ContentLayerClient {
public:
@@ -51,7 +53,7 @@
gfx::Size bounds_;
};
-TEST_F(LayerTreeHostMasksPixelTest, MaskOfLayer) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskOfLayer) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -67,12 +69,11 @@
mask->SetIsMask(true);
green->SetMaskLayer(mask.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL("mask_of_layer.png")));
+ RunPixelResourceTest(background,
+ base::FilePath(FILE_PATH_LITERAL("mask_of_layer.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, ImageMaskOfLayer) {
+TEST_P(LayerTreeHostMasksPixelTest, ImageMaskOfLayer) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -98,12 +99,11 @@
green->SetMaskLayer(mask.get());
background->AddChild(green);
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL("image_mask_of_layer.png")));
+ RunPixelResourceTest(
+ background, base::FilePath(FILE_PATH_LITERAL("image_mask_of_layer.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, MaskOfClippedLayer) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskOfClippedLayer) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -126,12 +126,12 @@
mask->SetIsMask(true);
green->SetMaskLayer(mask.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL("mask_of_clipped_layer.png")));
+ RunPixelResourceTest(
+ background,
+ base::FilePath(FILE_PATH_LITERAL("mask_of_clipped_layer.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, MaskWithReplica) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskWithReplica) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -156,12 +156,11 @@
replica->SetTransform(replica_transform);
green->SetReplicaLayer(replica.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL("mask_with_replica.png")));
+ RunPixelResourceTest(
+ background, base::FilePath(FILE_PATH_LITERAL("mask_with_replica.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, MaskWithReplicaOfClippedLayer) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskWithReplicaOfClippedLayer) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -194,13 +193,12 @@
replica->SetTransform(replica_transform);
green->SetReplicaLayer(replica.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL(
- "mask_with_replica_of_clipped_layer.png")));
+ RunPixelResourceTest(background,
+ base::FilePath(FILE_PATH_LITERAL(
+ "mask_with_replica_of_clipped_layer.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, MaskOfReplica) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskOfReplica) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -230,12 +228,11 @@
replica->SetMaskLayer(mask.get());
green->SetReplicaLayer(replica.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(FILE_PATH_LITERAL("mask_of_replica.png")));
+ RunPixelResourceTest(
+ background, base::FilePath(FILE_PATH_LITERAL("mask_of_replica.png")));
}
-TEST_F(LayerTreeHostMasksPixelTest, MaskOfReplicaOfClippedLayer) {
+TEST_P(LayerTreeHostMasksPixelTest, MaskOfReplicaOfClippedLayer) {
scoped_refptr<SolidColorLayer> background = CreateSolidColorLayer(
gfx::Rect(200, 200), SK_ColorWHITE);
@@ -272,10 +269,9 @@
replica->SetMaskLayer(mask.get());
green->SetReplicaLayer(replica.get());
- RunPixelTest(PIXEL_TEST_GL,
- background,
- base::FilePath(
- FILE_PATH_LITERAL("mask_of_replica_of_clipped_layer.png")));
+ RunPixelResourceTest(background,
+ base::FilePath(FILE_PATH_LITERAL(
+ "mask_of_replica_of_clipped_layer.png")));
}
} // namespace
diff --git a/cc/trees/layer_tree_host_pixeltest_on_demand_raster.cc b/cc/trees/layer_tree_host_pixeltest_on_demand_raster.cc
index 74e20f5..d703e22 100644
--- a/cc/trees/layer_tree_host_pixeltest_on_demand_raster.cc
+++ b/cc/trees/layer_tree_host_pixeltest_on_demand_raster.cc
@@ -45,7 +45,7 @@
picture_layer->AppendQuads(render_pass.get(), Occlusion(), &data);
for (const auto& quad : render_pass->quad_list)
- EXPECT_EQ(quad.material, DrawQuad::PICTURE_CONTENT);
+ EXPECT_EQ(quad->material, DrawQuad::PICTURE_CONTENT);
// Triggers pixel readback and ends the test.
LayerTreePixelTest::SwapBuffersOnThread(host_impl, result);
diff --git a/cc/trees/layer_tree_host_pixeltest_synchronous.cc b/cc/trees/layer_tree_host_pixeltest_synchronous.cc
new file mode 100644
index 0000000..c22075f
--- /dev/null
+++ b/cc/trees/layer_tree_host_pixeltest_synchronous.cc
@@ -0,0 +1,50 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/build_config.h"
+#include "cc/layers/content_layer_client.h"
+#include "cc/layers/picture_image_layer.h"
+#include "cc/layers/picture_layer.h"
+#include "cc/layers/solid_color_layer.h"
+#include "cc/test/fake_content_layer_client.h"
+#include "cc/test/layer_tree_pixel_test.h"
+#include "cc/test/pixel_comparator.h"
+
+#if !defined(OS_ANDROID)
+
+namespace cc {
+namespace {
+
+class LayerTreeHostSynchronousPixelTest : public LayerTreePixelTest {
+ public:
+ void InitializeSettings(LayerTreeSettings* settings) override {
+ LayerTreePixelTest::InitializeSettings(settings);
+ settings->single_thread_proxy_scheduler = false;
+ }
+
+ void BeginTest() override {
+ LayerTreePixelTest::BeginTest();
+ PostCompositeImmediatelyToMainThread();
+ }
+};
+
+TEST_F(LayerTreeHostSynchronousPixelTest, OneContentLayer) {
+ gfx::Size bounds(200, 200);
+
+ FakeContentLayerClient client;
+ SkPaint green_paint;
+ green_paint.setColor(SkColorSetARGB(255, 0, 255, 0));
+ client.add_draw_rect(gfx::RectF(bounds), green_paint);
+ scoped_refptr<PictureLayer> root = PictureLayer::Create(&client);
+ root->SetBounds(bounds);
+ root->SetIsDrawable(true);
+
+ RunSingleThreadedPixelTest(
+ PIXEL_TEST_GL, root, base::FilePath(FILE_PATH_LITERAL("green.png")));
+}
+
+} // namespace
+} // namespace cc
+
+#endif // OS_ANDROID
diff --git a/cc/trees/layer_tree_host_unittest.cc b/cc/trees/layer_tree_host_unittest.cc
index d15c6a3..2063217 100644
--- a/cc/trees/layer_tree_host_unittest.cc
+++ b/cc/trees/layer_tree_host_unittest.cc
@@ -4160,6 +4160,8 @@
protected:
void InitializeSettings(LayerTreeSettings* settings) override {
settings->impl_side_painting = true;
+ settings->use_zero_copy = false;
+ settings->use_one_copy = false;
}
scoped_ptr<FakeOutputSurface> CreateFakeOutputSurface(
@@ -5128,4 +5130,55 @@
// Impl-side painting is not supported for synchronous compositing.
SINGLE_THREAD_NOIMPL_TEST_F(LayerTreeHostTestSynchronousCompositeSwapPromise);
+// Make sure page scale and top control deltas are applied to the client even
+// when the LayerTreeHost doesn't have a root layer.
+class LayerTreeHostAcceptsDeltasFromImplWithoutRootLayer
+ : public LayerTreeHostTest {
+ public:
+ LayerTreeHostAcceptsDeltasFromImplWithoutRootLayer()
+ : deltas_sent_to_client_(false) {}
+
+ void BeginTest() override {
+ layer_tree_host()->SetRootLayer(nullptr);
+ info_.page_scale_delta = 3.14f;
+ info_.top_controls_delta = 2.73f;
+
+ PostSetNeedsCommitToMainThread();
+ }
+
+ void BeginMainFrame(const BeginFrameArgs& args) override {
+ EXPECT_EQ(nullptr, layer_tree_host()->root_layer());
+
+ layer_tree_host()->ApplyScrollAndScale(&info_);
+ EndTest();
+ }
+
+ void ApplyViewportDeltas(
+ const gfx::Vector2d& inner,
+ const gfx::Vector2d& outer,
+ float scale_delta,
+ float top_controls_delta) override {
+ EXPECT_EQ(info_.page_scale_delta, scale_delta);
+ EXPECT_EQ(info_.top_controls_delta, top_controls_delta);
+ deltas_sent_to_client_ = true;
+ }
+
+ void ApplyViewportDeltas(
+ const gfx::Vector2d& scroll,
+ float scale_delta,
+ float top_controls_delta) override {
+ EXPECT_EQ(info_.page_scale_delta, scale_delta);
+ EXPECT_EQ(info_.top_controls_delta, top_controls_delta);
+ deltas_sent_to_client_ = true;
+ }
+
+ void AfterTest() override {
+ EXPECT_TRUE(deltas_sent_to_client_);
+ }
+
+ ScrollAndScaleSet info_;
+ bool deltas_sent_to_client_;
+};
+
+MULTI_THREAD_TEST_F(LayerTreeHostAcceptsDeltasFromImplWithoutRootLayer);
} // namespace cc
diff --git a/cc/trees/single_thread_proxy.cc b/cc/trees/single_thread_proxy.cc
index 344dfb9..1af657a 100644
--- a/cc/trees/single_thread_proxy.cc
+++ b/cc/trees/single_thread_proxy.cc
@@ -467,8 +467,6 @@
void SingleThreadProxy::CompositeImmediately(base::TimeTicks frame_begin_time) {
TRACE_EVENT0("cc", "SingleThreadProxy::CompositeImmediately");
DCHECK(Proxy::IsMainThread());
- DCHECK(!layer_tree_host_impl_->settings().impl_side_painting)
- << "Impl-side painting and synchronous compositing are not supported.";
base::AutoReset<bool> inside_composite(&inside_synchronous_composite_, true);
if (layer_tree_host_->output_surface_lost()) {
@@ -493,6 +491,13 @@
{
DebugScopedSetImplThread impl(const_cast<SingleThreadProxy*>(this));
+ if (layer_tree_host_impl_->settings().impl_side_painting) {
+ layer_tree_host_impl_->ActivateSyncTree();
+ layer_tree_host_impl_->active_tree()->UpdateDrawProperties();
+ layer_tree_host_impl_->ManageTiles();
+ layer_tree_host_impl_->SynchronouslyInitializeAllTiles();
+ }
+
LayerTreeHostImpl::FrameData frame;
DoComposite(frame_begin_time, &frame);
diff --git a/cc/trees/thread_proxy.cc b/cc/trees/thread_proxy.cc
index f5496f8..14e2181 100644
--- a/cc/trees/thread_proxy.cc
+++ b/cc/trees/thread_proxy.cc
@@ -1217,12 +1217,10 @@
impl().scheduler = nullptr;
impl().layer_tree_host_impl = nullptr;
impl().weak_factory.InvalidateWeakPtrs();
- // We need to explicitly cancel the notifier, since it isn't using weak ptrs.
- // TODO(vmpstr): We should see if we can make it use weak ptrs and still keep
- // the convention of having a weak ptr factory initialized last. Alternatively
- // we should moved the notifier (and RenewTreePriority) to LTHI. See
- // crbug.com/411972
- impl().smoothness_priority_expiration_notifier.Cancel();
+ // We need to explicitly shutdown the notifier to destroy any weakptrs it is
+ // holding while still on the compositor thread. This also ensures any
+ // callbacks holding a ThreadProxy pointer are cancelled.
+ impl().smoothness_priority_expiration_notifier.Shutdown();
impl().contents_texture_manager = NULL;
completion->Signal();
}